diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b8a19aa52e..4787051f605 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +Release v1.35.24 (2020-11-09) +=== + +### Service Client Updates +* `service/datasync`: Updates service API and documentation +* `service/dynamodb`: Updates service API, documentation, and paginators + * This release adds supports for exporting Amazon DynamoDB table data to Amazon S3 to perform analytics at any scale. +* `service/ecs`: Updates service API and documentation + * This release provides native support for specifying Amazon FSx for Windows File Server file systems as volumes in your Amazon ECS task definitions. +* `service/es`: Updates service API, documentation, and paginators + * Adding support for package versioning in Amazon Elasticsearch Service +* `service/fsx`: Updates service API, documentation, paginators, and examples +* `service/iotanalytics`: Updates service API and documentation +* `service/macie2`: Updates service API and documentation +* `service/s3`: Updates service API, documentation, and examples + * S3 Intelligent-Tiering adds support for Archive and Deep Archive Access tiers; S3 Replication adds replication metrics and failure notifications, brings feature parity for delete marker replication +* `service/ssm`: Updates service API and documentation + * add a new filter to allow customer to filter automation executions by using resource-group which used for execute automation +* `service/storagegateway`: Updates service API, documentation, and paginators + * Added bandwidth rate limit schedule for Tape and Volume Gateways + Release v1.35.23 (2020-11-06) === diff --git a/aws/version.go b/aws/version.go index c9b74057244..cc41ca0bcda 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.35.23" +const SDKVersion = "1.35.24" diff --git a/models/apis/datasync/2018-11-09/api-2.json b/models/apis/datasync/2018-11-09/api-2.json index cbeb259576a..7dada010c30 100644 --- a/models/apis/datasync/2018-11-09/api-2.json +++ b/models/apis/datasync/2018-11-09/api-2.json @@ -416,6 +416,19 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ] + }, + "UpdateTaskExecution":{ + "name":"UpdateTaskExecution", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTaskExecutionRequest"}, + "output":{"shape":"UpdateTaskExecutionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] } }, "shapes":{ @@ -1611,6 +1624,22 @@ "members":{ } }, + "UpdateTaskExecutionRequest":{ + "type":"structure", + "required":[ + "TaskExecutionArn", + "Options" + ], + "members":{ + "TaskExecutionArn":{"shape":"TaskExecutionArn"}, + "Options":{"shape":"Options"} + } + }, + "UpdateTaskExecutionResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateTaskRequest":{ "type":"structure", "required":["TaskArn"], diff --git a/models/apis/datasync/2018-11-09/docs-2.json b/models/apis/datasync/2018-11-09/docs-2.json index 9adab00d452..9a164cbad13 100644 --- a/models/apis/datasync/2018-11-09/docs-2.json +++ b/models/apis/datasync/2018-11-09/docs-2.json @@ -32,7 +32,8 @@ "TagResource": "

Applies a key-value pair to an AWS resource.

", "UntagResource": "

Removes a tag from an AWS resource.

", "UpdateAgent": "

Updates the name of an agent.

", - "UpdateTask": "

Updates the metadata associated with a task.

" + "UpdateTask": "

Updates the metadata associated with a task.

", + "UpdateTaskExecution": "

Updates execution of a task.

You can modify bandwidth throttling for a task execution that is running or queued. For more information, see Adjusting Bandwidth Throttling for a Task Execution.

The only Option that can be modified by UpdateTaskExecution is BytesPerSecond .

" }, "shapes": { "ActivationKey": { @@ -57,10 +58,10 @@ "base": null, "refs": { "CreateLocationObjectStorageRequest$AgentArns": "

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

", - "CreateLocationS3Request$AgentArns": "

If you are using DataSync on an AWS Outpost, specify the Amazon Resource Names (ARNs) of the DataSync agents deployed on your AWS Outpost. For more information about launching a DataSync agent on an Amazon Outpost, see outposts-agent.

", + "CreateLocationS3Request$AgentArns": "

If you are using DataSync on an AWS Outpost, specify the Amazon Resource Names (ARNs) of the DataSync agents deployed on your Outpost. For more information about launching a DataSync agent on an AWS Outpost, see outposts-agent.

", "CreateLocationSmbRequest$AgentArns": "

The Amazon Resource Names (ARNs) of agents to use for a Simple Message Block (SMB) location.

", "DescribeLocationObjectStorageResponse$AgentArns": "

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

", - "DescribeLocationS3Response$AgentArns": "

If you are using DataSync on an Amazon Outpost, the Amazon Resource Name (ARNs) of the EC2 agents deployed on your AWS Outpost. For more information about launching a DataSync agent on an Amazon Outpost, see outposts-agent.

", + "DescribeLocationS3Response$AgentArns": "

If you are using DataSync on an AWS Outpost, the Amazon Resource Name (ARNs) of the EC2 agents deployed on your Outpost. For more information about launching a DataSync agent on an AWS Outpost, see outposts-agent.

", "DescribeLocationSmbResponse$AgentArns": "

The Amazon Resource Name (ARN) of the source SMB file system location that is created.

", "OnPremConfig$AgentArns": "

ARNs of the agents to use for an NFS location.

" } @@ -383,7 +384,7 @@ "FilterList": { "base": null, "refs": { - "CreateTaskRequest$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\"

", + "CreateTaskRequest$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

", "DescribeTaskExecutionResponse$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example: \"/folder1|/folder2\"

", "DescribeTaskExecutionResponse$Includes": "

A list of filter rules that determines which files to include when running a task. The list should contain a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe), for example: \"/folder1|/folder2\"

", "DescribeTaskResponse$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example: \"/folder1|/folder2\"

", @@ -647,7 +648,7 @@ "NfsSubdirectory": { "base": null, "refs": { - "CreateLocationNfsRequest$Subdirectory": "

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server. run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

If you are copying data to or from your AWS Snowcone device, see NFS Server on AWS Snowcone for more information.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" + "CreateLocationNfsRequest$Subdirectory": "

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

If you are copying data to or from your AWS Snowcone device, see NFS Server on AWS Snowcone for more information.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" } }, "NfsVersion": { @@ -710,6 +711,7 @@ "DescribeTaskExecutionResponse$Options": null, "DescribeTaskResponse$Options": "

The set of configuration options that control the behavior of a single execution of the task that occurs when you call StartTaskExecution. You can configure these options to preserve metadata such as user ID (UID) and group (GID), file permissions, data integrity verification, and so on.

For each individual task execution, you can override these options by specifying the overriding OverrideOptions value to operation.

", "StartTaskExecutionRequest$OverrideOptions": null, + "UpdateTaskExecutionRequest$Options": null, "UpdateTaskRequest$Options": null } }, @@ -743,8 +745,8 @@ "base": null, "refs": { "TaskExecutionResultDetail$PrepareStatus": "

The status of the PREPARING phase.

", - "TaskExecutionResultDetail$TransferStatus": "

The status of the TRANSFERRING Phase.

", - "TaskExecutionResultDetail$VerifyStatus": "

The status of the VERIFYING Phase.

" + "TaskExecutionResultDetail$TransferStatus": "

The status of the TRANSFERRING phase.

", + "TaskExecutionResultDetail$VerifyStatus": "

The status of the VERIFYING phase.

" } }, "PosixPermissions": { @@ -774,7 +776,7 @@ "S3BucketArn": { "base": null, "refs": { - "CreateLocationS3Request$S3BucketArn": "

The Amazon Resource Name (ARN) of the Amazon S3 bucket. If the bucket is on an AWS Outpost, this must be an access point ARN.

" + "CreateLocationS3Request$S3BucketArn": "

The ARN of the Amazon S3 bucket. If the bucket is on an AWS Outpost, this must be an access point ARN.

" } }, "S3Config": { @@ -787,8 +789,8 @@ "S3StorageClass": { "base": null, "refs": { - "CreateLocationS3Request$S3StorageClass": "

The Amazon S3 storage class that you want to store your files in when this location is used as a task destination. For buckets in AWS Regions, the storage class defaults to Standard. For buckets on AWS Outposts, the storage class defaults to AWS S3 Outposts.

For more information about S3 storage classes, see Amazon S3 Storage Classes in the Amazon Simple Storage Service Developer Guide. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

", - "DescribeLocationS3Response$S3StorageClass": "

The Amazon S3 storage class that you chose to store your files in when this location is used as a task destination. For more information about S3 storage classes, see Amazon S3 Storage Classes in the Amazon Simple Storage Service Developer Guide. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

" + "CreateLocationS3Request$S3StorageClass": "

The Amazon S3 storage class that you want to store your files in when this location is used as a task destination. For buckets in AWS Regions, the storage class defaults to Standard. For buckets on AWS Outposts, the storage class defaults to AWS S3 Outposts.

For more information about S3 storage classes, see Amazon S3 Storage Classes. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

", + "DescribeLocationS3Response$S3StorageClass": "

The Amazon S3 storage class that you chose to store your files in when this location is used as a task destination. For more information about S3 storage classes, see Amazon S3 Storage Classes. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

" } }, "S3Subdirectory": { @@ -945,7 +947,8 @@ "DescribeTaskExecutionResponse$TaskExecutionArn": "

The Amazon Resource Name (ARN) of the task execution that was described. TaskExecutionArn is hierarchical and includes TaskArn for the task that was executed.

For example, a TaskExecution value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2.

", "DescribeTaskResponse$CurrentTaskExecutionArn": "

The Amazon Resource Name (ARN) of the task execution that is syncing files.

", "StartTaskExecutionResponse$TaskExecutionArn": "

The Amazon Resource Name (ARN) of the specific task execution that was started.

", - "TaskExecutionListEntry$TaskExecutionArn": "

The Amazon Resource Name (ARN) of the task that was executed.

" + "TaskExecutionListEntry$TaskExecutionArn": "

The Amazon Resource Name (ARN) of the task that was executed.

", + "UpdateTaskExecutionRequest$TaskExecutionArn": "

The Amazon Resource Name (ARN) of the specific task execution that is being updated.

" } }, "TaskExecutionList": { @@ -1020,7 +1023,7 @@ "TaskStatus": { "base": null, "refs": { - "DescribeTaskResponse$Status": "

The status of the task that was described.

For detailed information about task execution statuses, see Understanding Task Statuses in the AWS DataSync User Guide.

", + "DescribeTaskResponse$Status": "

The status of the task that was described.

For detailed information about task execution statuses, see Understanding Task Statuses in the AWS DataSync User Guide.

", "TaskListEntry$Status": "

The status of the task.

" } }, @@ -1071,6 +1074,16 @@ "refs": { } }, + "UpdateTaskExecutionRequest": { + "base": null, + "refs": { + } + }, + "UpdateTaskExecutionResponse": { + "base": null, + "refs": { + } + }, "UpdateTaskRequest": { "base": "

UpdateTaskResponse

", "refs": { diff --git a/models/apis/dynamodb/2012-08-10/api-2.json b/models/apis/dynamodb/2012-08-10/api-2.json index ea1d2817fa1..b86bc7453de 100644 --- a/models/apis/dynamodb/2012-08-10/api-2.json +++ b/models/apis/dynamodb/2012-08-10/api-2.json @@ -207,6 +207,20 @@ "output":{"shape":"DescribeEndpointsResponse"}, "endpointoperation":true }, + "DescribeExport":{ + "name":"DescribeExport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExportInput"}, + "output":{"shape":"DescribeExportOutput"}, + "errors":[ + {"shape":"ExportNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, "DescribeGlobalTable":{ "name":"DescribeGlobalTable", "http":{ @@ -294,6 +308,23 @@ "endpointdiscovery":{ } }, + "ExportTableToPointInTime":{ + "name":"ExportTableToPointInTime", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportTableToPointInTimeInput"}, + "output":{"shape":"ExportTableToPointInTimeOutput"}, + "errors":[ + {"shape":"TableNotFoundException"}, + {"shape":"PointInTimeRecoveryUnavailableException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidExportTimeException"}, + {"shape":"ExportConflictException"}, + {"shape":"InternalServerError"} + ] + }, "GetItem":{ "name":"GetItem", "http":{ @@ -338,6 +369,19 @@ {"shape":"InternalServerError"} ] }, + "ListExports":{ + "name":"ListExports", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListExportsInput"}, + "output":{"shape":"ListExportsOutput"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InternalServerError"} + ] + }, "ListGlobalTables":{ "name":"ListGlobalTables", "http":{ @@ -988,6 +1032,10 @@ "max":25, "min":1 }, + "BilledSizeBytes":{ + "type":"long", + "min":0 + }, "BillingMode":{ "type":"string", "enum":[ @@ -1036,6 +1084,7 @@ "max":36, "min":1 }, + "ClientToken":{"type":"string"}, "Code":{"type":"string"}, "ComparisonOperator":{ "type":"string", @@ -1416,6 +1465,19 @@ "Endpoints":{"shape":"Endpoints"} } }, + "DescribeExportInput":{ + "type":"structure", + "required":["ExportArn"], + "members":{ + "ExportArn":{"shape":"ExportArn"} + } + }, + "DescribeExportOutput":{ + "type":"structure", + "members":{ + "ExportDescription":{"shape":"ExportDescription"} + } + }, "DescribeGlobalTableInput":{ "type":"structure", "required":["GlobalTableName"], @@ -1529,6 +1591,107 @@ "AttributeValueList":{"shape":"AttributeValueList"} } }, + "ExportArn":{ + "type":"string", + "max":1024, + "min":37 + }, + "ExportConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ExportDescription":{ + "type":"structure", + "members":{ + "ExportArn":{"shape":"ExportArn"}, + "ExportStatus":{"shape":"ExportStatus"}, + "StartTime":{"shape":"ExportStartTime"}, + "EndTime":{"shape":"ExportEndTime"}, + "ExportManifest":{"shape":"ExportManifest"}, + "TableArn":{"shape":"TableArn"}, + "TableId":{"shape":"TableId"}, + "ExportTime":{"shape":"ExportTime"}, + "ClientToken":{"shape":"ClientToken"}, + "S3Bucket":{"shape":"S3Bucket"}, + "S3BucketOwner":{"shape":"S3BucketOwner"}, + "S3Prefix":{"shape":"S3Prefix"}, + "S3SseAlgorithm":{"shape":"S3SseAlgorithm"}, + "S3SseKmsKeyId":{"shape":"S3SseKmsKeyId"}, + "FailureCode":{"shape":"FailureCode"}, + "FailureMessage":{"shape":"FailureMessage"}, + "ExportFormat":{"shape":"ExportFormat"}, + "BilledSizeBytes":{"shape":"BilledSizeBytes"}, + "ItemCount":{"shape":"ItemCount"} + } + }, + "ExportEndTime":{"type":"timestamp"}, + "ExportFormat":{ + "type":"string", + "enum":[ + "DYNAMODB_JSON", + "ION" + ] + }, + "ExportManifest":{"type":"string"}, + "ExportNextToken":{"type":"string"}, + "ExportNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ExportStartTime":{"type":"timestamp"}, + "ExportStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, + "ExportSummaries":{ + "type":"list", + "member":{"shape":"ExportSummary"} + }, + "ExportSummary":{ + "type":"structure", + "members":{ + "ExportArn":{"shape":"ExportArn"}, + "ExportStatus":{"shape":"ExportStatus"} + } + }, + "ExportTableToPointInTimeInput":{ + "type":"structure", + "required":[ + "TableArn", + "S3Bucket" + ], + "members":{ + "TableArn":{"shape":"TableArn"}, + "ExportTime":{"shape":"ExportTime"}, + "ClientToken":{ + "shape":"ClientToken", + "idempotencyToken":true + }, + "S3Bucket":{"shape":"S3Bucket"}, + "S3BucketOwner":{"shape":"S3BucketOwner"}, + "S3Prefix":{"shape":"S3Prefix"}, + "S3SseAlgorithm":{"shape":"S3SseAlgorithm"}, + "S3SseKmsKeyId":{"shape":"S3SseKmsKeyId"}, + "ExportFormat":{"shape":"ExportFormat"} + } + }, + "ExportTableToPointInTimeOutput":{ + "type":"structure", + "members":{ + "ExportDescription":{"shape":"ExportDescription"} + } + }, + "ExportTime":{"type":"timestamp"}, "ExpressionAttributeNameMap":{ "type":"map", "key":{"shape":"ExpressionAttributeNameVariable"}, @@ -1541,6 +1704,7 @@ "value":{"shape":"AttributeValue"} }, "ExpressionAttributeValueVariable":{"type":"string"}, + "FailureCode":{"type":"string"}, "FailureException":{ "type":"structure", "members":{ @@ -1548,6 +1712,7 @@ "ExceptionDescription":{"shape":"ExceptionDescription"} } }, + "FailureMessage":{"type":"string"}, "FilterConditionMap":{ "type":"map", "key":{"shape":"AttributeName"}, @@ -1761,6 +1926,13 @@ "exception":true, "fault":true }, + "InvalidExportTimeException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, "InvalidRestoreTimeException":{ "type":"structure", "members":{ @@ -1929,6 +2101,26 @@ "NextToken":{"shape":"NextTokenString"} } }, + "ListExportsInput":{ + "type":"structure", + "members":{ + "TableArn":{"shape":"TableArn"}, + "MaxResults":{"shape":"ListExportsMaxLimit"}, + "NextToken":{"shape":"ExportNextToken"} + } + }, + "ListExportsMaxLimit":{ + "type":"integer", + "max":25, + "min":1 + }, + "ListExportsOutput":{ + "type":"structure", + "members":{ + "ExportSummaries":{"shape":"ExportSummaries"}, + "NextToken":{"shape":"ExportNextToken"} + } + }, "ListGlobalTablesInput":{ "type":"structure", "members":{ @@ -2561,6 +2753,21 @@ "NONE" ] }, + "S3Bucket":{"type":"string"}, + "S3BucketOwner":{"type":"string"}, + "S3Prefix":{"type":"string"}, + "S3SseAlgorithm":{ + "type":"string", + "enum":[ + "AES256", + "KMS" + ] + }, + "S3SseKmsKeyId":{ + "type":"string", + "max":2048, + "min":1 + }, "SSEDescription":{ "type":"structure", "members":{ diff --git a/models/apis/dynamodb/2012-08-10/docs-2.json b/models/apis/dynamodb/2012-08-10/docs-2.json index a407950bc17..c8cb8df5af7 100644 --- a/models/apis/dynamodb/2012-08-10/docs-2.json +++ b/models/apis/dynamodb/2012-08-10/docs-2.json @@ -14,15 +14,18 @@ "DescribeContinuousBackups": "

Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

After continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

You can call DescribeContinuousBackups at a maximum rate of 10 times per second.

", "DescribeContributorInsights": "

Returns information about contributor insights, for a given table or global secondary index.

", "DescribeEndpoints": "

Returns the regional endpoint information.

", + "DescribeExport": "

Describes an existing table export.

", "DescribeGlobalTable": "

Returns information about the specified global table.

This operation only applies to Version 2017.11.29 of global tables. If you are using global tables Version 2019.11.21 you can use DescribeTable instead.

", "DescribeGlobalTableSettings": "

Describes Region-specific settings for a global table.

This operation only applies to Version 2017.11.29 of global tables.

", "DescribeLimits": "

Returns the current provisioned-capacity quotas for your AWS account in a Region, both for the Region as a whole and for any one DynamoDB table that you create there.

When you establish an AWS account, the account has initial quotas on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given Region. Also, there are per-table quotas that apply when you create a table there. For more information, see Service, Account, and Table Quotas page in the Amazon DynamoDB Developer Guide.

Although you can increase these quotas by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those quotas imposed by your account so that you have enough time to apply for an increase before you hit a quota.

For example, you could use one of the AWS SDKs to do the following:

  1. Call DescribeLimits for a particular Region to obtain your current account quotas on provisioned capacity there.

  2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that Region, and one to hold the aggregate write capacity units. Zero them both.

  3. Call ListTables to obtain a list of all your DynamoDB tables.

  4. For each table name listed by ListTables, do the following:

  5. Report the account quotas for that Region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

This will let you see whether you are getting close to your account-level quotas.

The per-table quotas apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned capacity extremely rapidly, but the only quota that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account quotas.

DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

The DescribeLimits Request element has no content.

", "DescribeTable": "

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

", "DescribeTableReplicaAutoScaling": "

Describes auto scaling settings across replicas of the global table at once.

This operation only applies to Version 2019.11.21 of global tables.

", "DescribeTimeToLive": "

Gives a description of the Time to Live (TTL) status on the specified table.

", + "ExportTableToPointInTime": "

Exports table data to an S3 bucket. The table must have point in time recovery enabled, and you can export data from any time within the point in time recovery window.

", "GetItem": "

The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data and there will be no Item element in the response.

GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

", "ListBackups": "

List backups associated with an AWS account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page.

In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested.

You can call ListBackups a maximum of five times per second.

", "ListContributorInsights": "

Returns a list of ContributorInsightsSummary for a table and all its global secondary indexes.

", + "ListExports": "

Lists completed exports within the past 90 days.

", "ListGlobalTables": "

Lists all global tables that have a replica in the specified Region.

This operation only applies to Version 2017.11.29 of global tables.

", "ListTables": "

Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.

", "ListTagsOfResource": "

List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

", @@ -361,6 +364,12 @@ "BatchWriteItemOutput$UnprocessedItems": "

A map of tables and requests against those tables that were not processed. The UnprocessedItems value is in the same form as RequestItems, so you can provide this value directly to a subsequent BatchGetItem operation. For more information, see RequestItems in the Request Parameters section.

Each UnprocessedItems entry consists of a table name and, for that table, a list of operations to perform (DeleteRequest or PutRequest).

If there are no unprocessed items remaining, the response contains an empty UnprocessedItems map.

" } }, + "BilledSizeBytes": { + "base": null, + "refs": { + "ExportDescription$BilledSizeBytes": "

The billable size of the table export.

" + } + }, "BillingMode": { "base": null, "refs": { @@ -437,6 +446,13 @@ "TransactWriteItemsInput$ClientRequestToken": "

Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, meaning that multiple identical calls have the same effect as one single call.

Although multiple identical calls using the same client request token produce the same result on the server (no side effects), the responses to the calls might not be the same. If the ReturnConsumedCapacity> parameter is set, then the initial TransactWriteItems call returns the amount of write capacity units consumed in making the changes. Subsequent TransactWriteItems calls with the same client token return the number of read capacity units consumed in reading the item.

A client request token is valid for 10 minutes after the first request that uses it is completed. After 10 minutes, any request with the same client token is treated as a new request. Do not resubmit the same request with the same client token for more than 10 minutes, or the result might not be idempotent.

If you submit a request with the same client token but a change in other parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch exception.

" } }, + "ClientToken": { + "base": null, + "refs": { + "ExportDescription$ClientToken": "

The client token that was provided for the export task. A client token makes calls to ExportTableToPointInTimeInput idempotent, meaning that multiple identical calls have the same effect as one single call.

", + "ExportTableToPointInTimeInput$ClientToken": "

Providing a ClientToken makes the call to ExportTableToPointInTimeInput idempotent, meaning that multiple identical calls have the same effect as one single call.

A client token is valid for 8 hours after the first request that uses it is completed. After 8 hours, any request with the same client token is treated as a new request. Do not resubmit the same request with the same client token for more than 8 hours, or the result might not be idempotent.

If you submit a request with the same client token but a change in other parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch exception.

" + } + }, "Code": { "base": null, "refs": { @@ -584,7 +600,7 @@ } }, "ContributorInsightsSummary": { - "base": "

Represents a Contributor Insights summary entry..

", + "base": "

Represents a Contributor Insights summary entry.

", "refs": { "ContributorInsightsSummaries$member": null } @@ -756,6 +772,16 @@ "refs": { } }, + "DescribeExportInput": { + "base": null, + "refs": { + } + }, + "DescribeExportOutput": { + "base": null, + "refs": { + } + }, "DescribeGlobalTableInput": { "base": null, "refs": { @@ -843,11 +869,14 @@ "CancellationReason$Message": "

Cancellation reason message description.

", "ConditionalCheckFailedException$message": "

The conditional request failed.

", "ContinuousBackupsUnavailableException$message": null, + "ExportConflictException$message": null, + "ExportNotFoundException$message": null, "GlobalTableAlreadyExistsException$message": null, "GlobalTableNotFoundException$message": null, "IdempotentParameterMismatchException$Message": null, "IndexNotFoundException$message": null, "InternalServerError$message": "

The server encountered an internal error trying to fulfill the request.

", + "InvalidExportTimeException$message": null, "InvalidRestoreTimeException$message": null, "ItemCollectionSizeLimitExceededException$message": "

The total size of an item collection has exceeded the maximum limit of 10 gigabytes.

", "LimitExceededException$message": "

Too many operations for a given subscriber.

", @@ -892,6 +921,99 @@ "ExpectedAttributeMap$value": null } }, + "ExportArn": { + "base": null, + "refs": { + "DescribeExportInput$ExportArn": "

The Amazon Resource Name (ARN) associated with the export.

", + "ExportDescription$ExportArn": "

The Amazon Resource Name (ARN) of the table export.

", + "ExportSummary$ExportArn": "

The Amazon Resource Name (ARN) of the export.

" + } + }, + "ExportConflictException": { + "base": "

There was a conflict when writing to the specified S3 bucket.

", + "refs": { + } + }, + "ExportDescription": { + "base": "

Represents the properties of the exported table.

", + "refs": { + "DescribeExportOutput$ExportDescription": "

Represents the properties of the export.

", + "ExportTableToPointInTimeOutput$ExportDescription": "

Contains a description of the table export.

" + } + }, + "ExportEndTime": { + "base": null, + "refs": { + "ExportDescription$EndTime": "

The time at which the export task completed.

" + } + }, + "ExportFormat": { + "base": null, + "refs": { + "ExportDescription$ExportFormat": "

The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON or ION.

", + "ExportTableToPointInTimeInput$ExportFormat": "

The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON or ION.

" + } + }, + "ExportManifest": { + "base": null, + "refs": { + "ExportDescription$ExportManifest": "

The name of the manifest file for the export task.

" + } + }, + "ExportNextToken": { + "base": null, + "refs": { + "ListExportsInput$NextToken": "

An optional string that, if supplied, must be copied from the output of a previous call to ListExports. When provided in this manner, the API fetches the next page of results.

", + "ListExportsOutput$NextToken": "

If this value is returned, there are additional results to be displayed. To retrieve them, call ListExports again, with NextToken set to this value.

" + } + }, + "ExportNotFoundException": { + "base": "

The specified export was not found.

", + "refs": { + } + }, + "ExportStartTime": { + "base": null, + "refs": { + "ExportDescription$StartTime": "

The time at which the export task began.

" + } + }, + "ExportStatus": { + "base": null, + "refs": { + "ExportDescription$ExportStatus": "

Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED.

", + "ExportSummary$ExportStatus": "

Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED.

" + } + }, + "ExportSummaries": { + "base": null, + "refs": { + "ListExportsOutput$ExportSummaries": "

A list of ExportSummary objects.

" + } + }, + "ExportSummary": { + "base": "

Summary information about an export task.

", + "refs": { + "ExportSummaries$member": null + } + }, + "ExportTableToPointInTimeInput": { + "base": null, + "refs": { + } + }, + "ExportTableToPointInTimeOutput": { + "base": null, + "refs": { + } + }, + "ExportTime": { + "base": null, + "refs": { + "ExportDescription$ExportTime": "

Point in time from which table data was exported.

", + "ExportTableToPointInTimeInput$ExportTime": "

Time in the past from which to export table data. The table export will be a snapshot of the table's state at this point in time.

" + } + }, "ExpressionAttributeNameMap": { "base": null, "refs": { @@ -935,12 +1057,24 @@ "ExpressionAttributeValueMap$key": null } }, + "FailureCode": { + "base": null, + "refs": { + "ExportDescription$FailureCode": "

Status code for the result of the failed export.

" + } + }, "FailureException": { "base": "

Represents a failure a contributor insights operation.

", "refs": { "DescribeContributorInsightsOutput$FailureException": "

Returns information about the last failure that encountered.

The most common exceptions for a FAILED status are:

" } }, + "FailureMessage": { + "base": null, + "refs": { + "ExportDescription$FailureMessage": "

Export failure reason description.

" + } + }, "FilterConditionMap": { "base": null, "refs": { @@ -1151,6 +1285,11 @@ "refs": { } }, + "InvalidExportTimeException": { + "base": "

The specified ExportTime is outside of the point in time recovery window.

", + "refs": { + } + }, "InvalidRestoreTimeException": { "base": "

An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime and LatestRestorableDateTime.

", "refs": { @@ -1204,6 +1343,7 @@ "ItemCount": { "base": null, "refs": { + "ExportDescription$ItemCount": "

The number of items exported.

", "SourceTableDetails$ItemCount": "

Number of items in the table. Note that this is an approximate value.

" } }, @@ -1361,6 +1501,22 @@ "refs": { } }, + "ListExportsInput": { + "base": null, + "refs": { + } + }, + "ListExportsMaxLimit": { + "base": null, + "refs": { + "ListExportsInput$MaxResults": "

Maximum number of results to return per page.

" + } + }, + "ListExportsOutput": { + "base": null, + "refs": { + } + }, "ListGlobalTablesInput": { "base": null, "refs": { @@ -1977,6 +2133,41 @@ "Update$ReturnValuesOnConditionCheckFailure": "

Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW.

" } }, + "S3Bucket": { + "base": null, + "refs": { + "ExportDescription$S3Bucket": "

The name of the Amazon S3 bucket containing the export.

", + "ExportTableToPointInTimeInput$S3Bucket": "

The name of the Amazon S3 bucket to export the snapshot to.

" + } + }, + "S3BucketOwner": { + "base": null, + "refs": { + "ExportDescription$S3BucketOwner": "

The ID of the AWS account that owns the bucket containing the export.

", + "ExportTableToPointInTimeInput$S3BucketOwner": "

The ID of the AWS account that owns the bucket the export will be stored in.

" + } + }, + "S3Prefix": { + "base": null, + "refs": { + "ExportDescription$S3Prefix": "

The Amazon S3 bucket prefix used as the file name and path of the exported snapshot.

", + "ExportTableToPointInTimeInput$S3Prefix": "

The Amazon S3 bucket prefix to use as the file name and path of the exported snapshot.

" + } + }, + "S3SseAlgorithm": { + "base": null, + "refs": { + "ExportDescription$S3SseAlgorithm": "

Type of encryption used on the bucket where export data is stored. Valid values for S3SseAlgorithm are:

", + "ExportTableToPointInTimeInput$S3SseAlgorithm": "

Type of encryption used on the bucket where export data will be stored. Valid values for S3SseAlgorithm are:

" + } + }, + "S3SseKmsKeyId": { + "base": null, + "refs": { + "ExportDescription$S3SseKmsKeyId": "

The ID of the AWS KMS managed key used to encrypt the S3 bucket where export data is stored (if applicable).

", + "ExportTableToPointInTimeInput$S3SseKmsKeyId": "

The ID of the AWS KMS managed key used to encrypt the S3 bucket where export data will be stored (if applicable).

" + } + }, "SSEDescription": { "base": "

The description of the server-side encryption status on the specified table.

", "refs": { @@ -2127,6 +2318,9 @@ "base": null, "refs": { "BackupSummary$TableArn": "

ARN associated with the table.

", + "ExportDescription$TableArn": "

The Amazon Resource Name (ARN) of the table that was exported.

", + "ExportTableToPointInTimeInput$TableArn": "

The Amazon Resource Name (ARN) associated with the table to export.

", + "ListExportsInput$TableArn": "

The Amazon Resource Name (ARN) associated with the exported table.

", "RestoreSummary$SourceTableArn": "

The ARN of the source table of the backup that is being restored.

", "RestoreTableToPointInTimeInput$SourceTableArn": "

The DynamoDB table that will be restored. This value is an Amazon Resource Name (ARN).

", "SourceTableDetails$TableArn": "

ARN of the table for which backup was created.

" @@ -2160,6 +2354,7 @@ "base": null, "refs": { "BackupSummary$TableId": "

Unique identifier for the table.

", + "ExportDescription$TableId": "

Unique ID of the table that was exported.

", "SourceTableDetails$TableId": "

Unique identifier for the table for which the backup was created.

", "TableDescription$TableId": "

Unique identifier for the table for which the backup was created.

" } diff --git a/models/apis/dynamodb/2012-08-10/paginators-1.json b/models/apis/dynamodb/2012-08-10/paginators-1.json index ab2d5c8c08f..69fadc92a5e 100644 --- a/models/apis/dynamodb/2012-08-10/paginators-1.json +++ b/models/apis/dynamodb/2012-08-10/paginators-1.json @@ -9,6 +9,11 @@ "limit_key": "MaxResults", "output_token": "NextToken" }, + "ListExports": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "ListTables": { "input_token": "ExclusiveStartTableName", "limit_key": "Limit", diff --git a/models/apis/ecs/2014-11-13/api-2.json b/models/apis/ecs/2014-11-13/api-2.json index d64f4dbb693..226da77a397 100644 --- a/models/apis/ecs/2014-11-13/api-2.json +++ b/models/apis/ecs/2014-11-13/api-2.json @@ -1715,6 +1715,30 @@ "type":"list", "member":{"shape":"KeyValuePair"} }, + "FSxWindowsFileServerAuthorizationConfig":{ + "type":"structure", + "required":[ + "credentialsParameter", + "domain" + ], + "members":{ + "credentialsParameter":{"shape":"String"}, + "domain":{"shape":"String"} + } + }, + "FSxWindowsFileServerVolumeConfiguration":{ + "type":"structure", + "required":[ + "fileSystemId", + "rootDirectory", + "authorizationConfig" + ], + "members":{ + "fileSystemId":{"shape":"String"}, + "rootDirectory":{"shape":"String"}, + "authorizationConfig":{"shape":"FSxWindowsFileServerAuthorizationConfig"} + } + }, "Failure":{ "type":"structure", "members":{ @@ -3216,7 +3240,8 @@ "name":{"shape":"String"}, "host":{"shape":"HostVolumeProperties"}, "dockerVolumeConfiguration":{"shape":"DockerVolumeConfiguration"}, - "efsVolumeConfiguration":{"shape":"EFSVolumeConfiguration"} + "efsVolumeConfiguration":{"shape":"EFSVolumeConfiguration"}, + "fsxWindowsFileServerVolumeConfiguration":{"shape":"FSxWindowsFileServerVolumeConfiguration"} } }, "VolumeFrom":{ diff --git a/models/apis/ecs/2014-11-13/docs-2.json b/models/apis/ecs/2014-11-13/docs-2.json index e1c04045960..28da8cfed14 100644 --- a/models/apis/ecs/2014-11-13/docs-2.json +++ b/models/apis/ecs/2014-11-13/docs-2.json @@ -162,16 +162,16 @@ "base": null, "refs": { "ContainerDefinition$essential": "

If the essential parameter of a container is marked as true, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential parameter of a container is marked as false, then its failure does not affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.

All tasks must have at least one essential container. If you have an application that is composed of multiple containers, you should group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide.

", - "ContainerDefinition$disableNetworking": "

When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers.

", - "ContainerDefinition$privileged": "

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

", - "ContainerDefinition$readonlyRootFilesystem": "

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

", - "ContainerDefinition$interactive": "

When this parameter is true, this allows you to deploy containerized applications that require stdin or a tty to be allocated. This parameter maps to OpenStdin in the Create a container section of the Docker Remote API and the --interactive option to docker run.

", - "ContainerDefinition$pseudoTerminal": "

When this parameter is true, a TTY is allocated. This parameter maps to Tty in the Create a container section of the Docker Remote API and the --tty option to docker run.

", + "ContainerDefinition$disableNetworking": "

When this parameter is true, networking is disabled within the container. This parameter maps to NetworkDisabled in the Create a container section of the Docker Remote API.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

", + "ContainerDefinition$privileged": "

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks using the Fargate launch type.

", + "ContainerDefinition$readonlyRootFilesystem": "

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

", + "ContainerDefinition$interactive": "

When this parameter is true, this allows you to deploy containerized applications that require stdin or a tty to be allocated. This parameter maps to OpenStdin in the Create a container section of the Docker Remote API and the --interactive option to docker run.

", + "ContainerDefinition$pseudoTerminal": "

When this parameter is true, a TTY is allocated. This parameter maps to Tty in the Create a container section of the Docker Remote API and the --tty option to docker run.

", "DeleteServiceRequest$force": "

If true, allows you to delete a service even if it has not been scaled down to zero tasks. It is only necessary to use this if the service is using the REPLICA scheduling strategy.

", "DeleteTaskSetRequest$force": "

If true, this allows you to delete a task set even if it hasn't been scaled down to zero.

", "DeregisterContainerInstanceRequest$force": "

Forces the deregistration of the container instance. If you have tasks running on the container instance when you deregister it with the force option, these tasks remain running until you terminate the instance or the tasks stop through some other means, but they are orphaned (no longer monitored or accounted for by Amazon ECS). If an orphaned task on your container instance is part of an Amazon ECS service, then the service scheduler starts another copy of that task, on a different container instance if possible.

Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer target group are deregistered. They begin connection draining according to the settings on the load balancer or target group.

", "DockerVolumeConfiguration$autoprovision": "

If this value is true, the Docker volume is created if it does not already exist.

This field is only used if the scope is shared.

", - "LinuxParameters$initProcessEnabled": "

Run an init process inside the container that forwards signals and reaps processes. This parameter maps to the --init option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

", + "LinuxParameters$initProcessEnabled": "

Run an init process inside the container that forwards signals and reaps processes. This parameter maps to the --init option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

", "MountPoint$readOnly": "

If this value is true, the container has read-only access to the volume. If this value is false, then the container can write to the volume. The default value is false.

", "VolumeFrom$readOnly": "

If this value is true, the container has read-only access to the volume. If this value is false, then the container can write to the volume. The default value is false.

" } @@ -180,9 +180,9 @@ "base": null, "refs": { "Container$exitCode": "

The exit code returned from the container.

", - "ContainerDefinition$memory": "

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If using the Fargate launch type, this parameter is optional.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", - "ContainerDefinition$memoryReservation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", - "ContainerDefinition$startTimeout": "

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the Fargate launch type, this parameter requires that the task or service uses platform version 1.3.0 or later. If this parameter is not specified, the default value of 3 minutes is used.

For tasks using the EC2 launch type, if the startTimeout parameter is not specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_START_TIMEOUT is used by default. If neither the startTimeout parameter or the ECS_CONTAINER_START_TIMEOUT agent configuration variable are set, then the default values of 3 minutes for Linux containers and 8 minutes on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

", + "ContainerDefinition$memory": "

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If using the Fargate launch type, this parameter is optional.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", + "ContainerDefinition$memoryReservation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", + "ContainerDefinition$startTimeout": "

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it is enforced indendently from this start timeout value.

For tasks using the Fargate launch type, this parameter requires that the task or service uses platform version 1.3.0 or later.

For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

", "ContainerDefinition$stopTimeout": "

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.

For tasks using the Fargate launch type, the task or service requires platform version 1.3.0 or later. The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.

For tasks using the EC2 launch type, if the stopTimeout parameter is not specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used by default. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

", "ContainerOverride$cpu": "

The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name.

", "ContainerOverride$memory": "

The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name.

", @@ -198,9 +198,9 @@ "HealthCheck$timeout": "

The time period in seconds to wait for a health check to succeed before it is considered a failure. You may specify between 2 and 60 seconds. The default value is 5.

", "HealthCheck$retries": "

The number of times to retry a failed health check before the container is considered unhealthy. You may specify between 1 and 10 retries. The default value is 3.

", "HealthCheck$startPeriod": "

The optional grace period within which to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You may specify between 0 and 300 seconds. The startPeriod is disabled by default.

If a health check succeeds within the startPeriod, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.

", - "LinuxParameters$sharedMemorySize": "

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

If you are using tasks that use the Fargate launch type, the sharedMemorySize parameter is not supported.

", - "LinuxParameters$maxSwap": "

The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the --memory-swap option to docker run where the value would be the sum of the container memory plus the maxSwap value.

If a maxSwap value of 0 is specified, the container will not use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container will use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

If you are using tasks that use the Fargate launch type, the maxSwap parameter is not supported.

", - "LinuxParameters$swappiness": "

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 will cause swapping to not happen unless absolutely necessary. A swappiness value of 100 will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter is not specified, a default value of 60 is used. If a value is not specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

If you are using tasks that use the Fargate launch type, the swappiness parameter is not supported.

", + "LinuxParameters$sharedMemorySize": "

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

If you are using tasks that use the Fargate launch type, the sharedMemorySize parameter is not supported.

", + "LinuxParameters$maxSwap": "

The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the --memory-swap option to docker run where the value would be the sum of the container memory plus the maxSwap value.

If a maxSwap value of 0 is specified, the container will not use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container will use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

If you are using tasks that use the Fargate launch type, the maxSwap parameter is not supported.

", + "LinuxParameters$swappiness": "

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 will cause swapping to not happen unless absolutely necessary. A swappiness value of 100 will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter is not specified, a default value of 60 is used. If a value is not specified for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness option to docker run.

If you are using tasks that use the Fargate launch type, the swappiness parameter is not supported.

", "ListAttributesRequest$maxResults": "

The maximum number of cluster results returned by ListAttributes in paginated output. When this parameter is used, ListAttributes only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListAttributes request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListAttributes returns up to 100 results and a nextToken value if applicable.

", "ListClustersRequest$maxResults": "

The maximum number of cluster results returned by ListClusters in paginated output. When this parameter is used, ListClusters only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListClusters request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListClusters returns up to 100 results and a nextToken value if applicable.

", "ListContainerInstancesRequest$maxResults": "

The maximum number of container instance results returned by ListContainerInstances in paginated output. When this parameter is used, ListContainerInstances only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListContainerInstances request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListContainerInstances returns up to 100 results and a nextToken value if applicable.

", @@ -211,7 +211,7 @@ "LoadBalancer$containerPort": "

The port on the container to associate with the load balancer. This port must correspond to a containerPort in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they are launched on must allow ingress traffic on the hostPort of the port mapping.

", "NetworkBinding$containerPort": "

The port number on the container that is used with the network binding.

", "NetworkBinding$hostPort": "

The port number on the host that is used with the network binding.

", - "PortMapping$containerPort": "

The port number on the container that is bound to the user-specified or automatically assigned host port.

If you are using containers in a task with the awsvpc or host network mode, exposed ports should be specified using containerPort.

If you are using containers in a task with the bridge network mode and you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range. For more information, see hostPort. Port mappings that are automatically assigned in this way do not count toward the 100 reserved ports limit of a container instance.

You cannot expose the same container port for multiple protocols. An error will be returned if this is attempted.

", + "PortMapping$containerPort": "

The port number on the container that is bound to the user-specified or automatically assigned host port.

If you are using containers in a task with the awsvpc or host network mode, exposed ports should be specified using containerPort.

If you are using containers in a task with the bridge network mode and you specify a container port and not a host port, your container automatically receives a host port in the ephemeral port range. For more information, see hostPort. Port mappings that are automatically assigned in this way do not count toward the 100 reserved ports limit of a container instance.

", "PortMapping$hostPort": "

The port number on the container instance to reserve for your container.

If you are using containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort.

If you are using containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

The default ephemeral port range from 49153 through 65535 is always used for Docker versions before 1.6.0.

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running (after a task stops, the host port is released). The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time, including the default reserved ports. Automatically assigned ports don't count toward the 100 reserved ports limit.

", "RunTaskRequest$count": "

The number of instantiations of the specified task to place on your cluster. You can specify up to 10 tasks per call.

", "Service$healthCheckGracePeriodSeconds": "

The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started.

", @@ -375,7 +375,7 @@ "CompatibilityList": { "base": null, "refs": { - "RegisterTaskDefinitionRequest$requiresCompatibilities": "

The launch type required by the task. If no value is specified, it defaults to EC2.

", + "RegisterTaskDefinitionRequest$requiresCompatibilities": "

The task launch type that Amazon ECS should validate the task definition against. This ensures that the task definition parameters are compatible with the specified launch type. If no value is specified, it defaults to EC2.

", "TaskDefinition$compatibilities": "

The launch type to use with your task. For more information, see Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.

", "TaskDefinition$requiresCompatibilities": "

The launch type the task requires. If no value is specified, it will default to EC2. Valid values include EC2 and FARGATE.

" } @@ -395,7 +395,7 @@ "ContainerCondition": { "base": null, "refs": { - "ContainerDependency$condition": "

The dependency condition of the container. The following are the available conditions and their behavior:

" + "ContainerDependency$condition": "

The dependency condition of the container. The following are the available conditions and their behavior:

" } }, "ContainerDefinition": { @@ -738,7 +738,7 @@ "DevicesList": { "base": null, "refs": { - "LinuxParameters$devices": "

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you are using tasks that use the Fargate launch type, the devices parameter is not supported.

" + "LinuxParameters$devices": "

Any host devices to expose to the container. This parameter maps to Devices in the Create a container section of the Docker Remote API and the --device option to docker run.

If you are using tasks that use the Fargate launch type, the devices parameter is not supported.

" } }, "DiscoverPollEndpointRequest": { @@ -754,7 +754,7 @@ "DockerLabelsMap": { "base": null, "refs": { - "ContainerDefinition$dockerLabels": "

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" + "ContainerDefinition$dockerLabels": "

A key/value map of labels to add to the container. This parameter maps to Labels in the Create a container section of the Docker Remote API and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" } }, "DockerVolumeConfiguration": { @@ -809,19 +809,31 @@ "EnvironmentFiles": { "base": null, "refs": { - "ContainerDefinition$environmentFiles": "

A list of files containing the environment variables to pass to a container. This parameter maps to the --env-file option to docker run.

You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information on the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they are processed from the top down. It is recommended to use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.

This field is not valid for containers in tasks using the Fargate launch type.

", + "ContainerDefinition$environmentFiles": "

A list of files containing the environment variables to pass to a container. This parameter maps to the --env-file option to docker run.

You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information on the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they are processed from the top down. It is recommended to use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.

This field is not valid for containers in tasks using the Fargate launch type.

", "ContainerOverride$environmentFiles": "

A list of files containing the environment variables to pass to a container, instead of the value from the container definition.

" } }, "EnvironmentVariables": { "base": null, "refs": { - "ContainerDefinition$environment": "

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

", + "ContainerDefinition$environment": "

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We do not recommend using plaintext environment variables for sensitive information, such as credential data.

", "ContainerOverride$environment": "

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. You must also specify a container name.

" } }, + "FSxWindowsFileServerAuthorizationConfig": { + "base": "

The authorization configuration details for Amazon FSx for Windows File Server file system. See FSxWindowsFileServerVolumeConfiguration in the Amazon Elastic Container Service API Reference.

For more information and the input format, see Amazon FSx for Windows File Server Volumes in the Amazon Elastic Container Service Developer Guide.

", + "refs": { + "FSxWindowsFileServerVolumeConfiguration$authorizationConfig": "

The authorization configuration details for the Amazon FSx for Windows File Server file system.

" + } + }, + "FSxWindowsFileServerVolumeConfiguration": { + "base": "

This parameter is specified when you are using Amazon FSx for Windows File Server file system for task storage.

For more information and the input format, see Amazon FSx for Windows File Server Volumes in the Amazon Elastic Container Service Developer Guide.

", + "refs": { + "Volume$fsxWindowsFileServerVolumeConfiguration": "

This parameter is specified when you are using Amazon FSx for Windows File Server file system for task storage.

" + } + }, "Failure": { - "base": "

A failed resource.

", + "base": "

A failed resource. For a list of common causes, see API failure reasons in the Amazon Elastic Container Service Developer Guide.

", "refs": { "Failures$member": null } @@ -867,7 +879,7 @@ "HealthCheck": { "base": "

An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).

You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

The following describes the possible healthStatus values for a container:

The following describes the possible healthStatus values for a task. The container health check status of nonessential containers do not have an effect on the health status of a task.

If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

The following are notes about container health check support:

", "refs": { - "ContainerDefinition$healthCheck": "

The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" + "ContainerDefinition$healthCheck": "

The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck in the Create a container section of the Docker Remote API and the HEALTHCHECK parameter of docker run.

" } }, "HealthStatus": { @@ -886,7 +898,7 @@ "HostEntryList": { "base": null, "refs": { - "ContainerDefinition$extraHosts": "

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" + "ContainerDefinition$extraHosts": "

A list of hostnames and IP address mappings to append to the /etc/hosts file on the container. This parameter maps to ExtraHosts in the Create a container section of the Docker Remote API and the --add-host option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" } }, "HostVolumeProperties": { @@ -928,7 +940,7 @@ "Cluster$runningTasksCount": "

The number of tasks in the cluster that are in the RUNNING state.

", "Cluster$pendingTasksCount": "

The number of tasks in the cluster that are in the PENDING state.

", "Cluster$activeServicesCount": "

The number of services that are running on the cluster in an ACTIVE state. You can view these services with ListServices.

", - "ContainerDefinition$cpu": "

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2. However, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition.

", + "ContainerDefinition$cpu": "

The number of cpu units reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run.

This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu value.

You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.

Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that is the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task would be guaranteed a minimum of 512 CPU units when needed, and each container could float to higher CPU usage if the other container was not using it, but if both tasks were 100% active all of the time, they would be limited to 512 CPU units.

On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2. However, the CPU parameter is not required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:

On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that is described in the task definition. A null or zero CPU value is passed to Docker as 0, which Windows interprets as 1% of one CPU.

", "ContainerInstance$runningTasksCount": "

The number of tasks on the container instance that are in the RUNNING status.

", "ContainerInstance$pendingTasksCount": "

The number of tasks on the container instance that are in the PENDING status.

", "Deployment$desiredCount": "

The most recent desired count of tasks that was specified for the service to deploy or maintain.

", @@ -943,7 +955,7 @@ "TaskSet$computedDesiredCount": "

The computed desired count for the task set. This is calculated by multiplying the service's desiredCount by the task set's scale percentage. The result is always rounded up. For example, if the computed desired count is 1.2, it rounds up to 2 tasks.

", "TaskSet$pendingCount": "

The number of tasks in the task set that are in the PENDING status during a deployment. A task in the PENDING state is preparing to enter the RUNNING state. A task set enters the PENDING status when it launches for the first time or when it is restarted after being in the STOPPED state.

", "TaskSet$runningCount": "

The number of tasks in the task set that are in the RUNNING status during a deployment. A task in the RUNNING state is running and ready for use.

", - "Tmpfs$size": "

The size (in MiB) of the tmpfs volume.

", + "Tmpfs$size": "

The maximum size (in MiB) of the tmpfs volume.

", "Ulimit$softLimit": "

The soft limit for the ulimit type.

", "Ulimit$hardLimit": "

The hard limit for the ulimit type.

" } @@ -1099,16 +1111,16 @@ "LoadBalancers": { "base": null, "refs": { - "CreateServiceRequest$loadBalancers": "

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you can specify multiple target groups to attach to the service. The service-linked role is required for services that make use of multiple target groups. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

", + "CreateServiceRequest$loadBalancers": "

A load balancer object representing the load balancers to use with your service. For more information, see Service Load Balancing in the Amazon Elastic Container Service Developer Guide.

If the service is using the rolling update (ECS) deployment controller and using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach to the service. The service-linked role is required for services that make use of multiple target groups. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If the service is using the CODE_DEPLOY deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When creating an AWS CodeDeploy deployment group, you specify two target groups (referred to as a targetGroupPair). During a deployment, AWS CodeDeploy determines which task set in your service has the status PRIMARY and associates one target group with it, and then associates the other target group with the replacement task set. The load balancer can also have up to two listeners: a required listener for production traffic and an optional listener that allows you perform validation tests with Lambda functions before routing production traffic to it.

After you create a service using the ECS deployment controller, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable. If you are using the CODE_DEPLOY deployment controller, these values can be changed when updating the service.

For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. The load balancer name parameter must be omitted. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.

For Classic Load Balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. The target group ARN parameter must be omitted. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.

Services with tasks that use the awsvpc network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ip as the target type, not instance, because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance.

", "CreateTaskSetRequest$loadBalancers": "

A load balancer object representing the load balancer to use with the task set. The supported load balancer types are either an Application Load Balancer or a Network Load Balancer.

", "Service$loadBalancers": "

A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer.

", "TaskSet$loadBalancers": "

Details on a load balancer that is used with a task set.

" } }, "LogConfiguration": { - "base": "

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run . By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

The following should be noted when specifying a log configuration for your containers:

", + "base": "

The log configuration for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run .

By default, containers use the same logging driver that the Docker daemon uses; however the container may use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

The following should be noted when specifying a log configuration for your containers:

", "refs": { - "ContainerDefinition$logConfiguration": "

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "ContainerDefinition$logConfiguration": "

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" } }, "LogConfigurationOptionsMap": { @@ -1120,7 +1132,7 @@ "LogDriver": { "base": null, "refs": { - "LogConfiguration$logDriver": "

The log driver to use for the container. The valid values listed earlier are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks using the EC2 launch type, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we do not currently provide support for running modified copies of this software.

" + "LogConfiguration$logDriver": "

The log driver to use for the container.

For tasks on AWS Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs log driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we do not currently provide support for running modified copies of this software.

" } }, "Long": { @@ -1146,8 +1158,8 @@ "ManagedScalingStepSize": { "base": null, "refs": { - "ManagedScaling$minimumScalingStepSize": "

The minimum number of container instances that Amazon ECS will scale in or scale out at one time. If this parameter is omitted, the default value of 1 is used.

", - "ManagedScaling$maximumScalingStepSize": "

The maximum number of container instances that Amazon ECS will scale in or scale out at one time. If this parameter is omitted, the default value of 10000 is used.

" + "ManagedScaling$minimumScalingStepSize": "

The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter If this parameter is omitted, the default value of 1 is used.

When additional capacity is required, Amazon ECS will scale up the minimum scaling step size even if the actual demand is less than the minimum scaling step size.

If you use a capacity provider with an Auto Scaling group configured with more than one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum scaling step size value and will ignore both the maximum scaling step size as well as the capacity demand.

", + "ManagedScaling$maximumScalingStepSize": "

The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 10000 is used.

" } }, "ManagedScalingTargetCapacity": { @@ -1176,7 +1188,7 @@ "MountPointList": { "base": null, "refs": { - "ContainerDefinition$mountPoints": "

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" + "ContainerDefinition$mountPoints": "

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers cannot mount directories on a different drive, and mount point cannot be across drives.

" } }, "NetworkBinding": { @@ -1316,7 +1328,7 @@ "PortMappingList": { "base": null, "refs": { - "ContainerDefinition$portMappings": "

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, you should only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" + "ContainerDefinition$portMappings": "

The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.

For task definitions that use the awsvpc network mode, you should only specify the containerPort. The hostPort can be left blank or it must be the same value as the containerPort.

Port mappings on Windows use the NetNAT gateway address rather than localhost. There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.

This parameter maps to PortBindings in the Create a container section of the Docker Remote API and the --publish option to docker run. If the network mode of a task definition is set to none, then you can't specify port mappings. If the network mode of a task definition is set to host, then host ports must either be undefined or they must match the container port in the port mapping.

After a task reaches the RUNNING status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings section DescribeTasks responses.

" } }, "PropagateTags": { @@ -1653,8 +1665,8 @@ "Attachment$status": "

The status of the attachment. Valid values are PRECREATED, CREATED, ATTACHING, ATTACHED, DETACHING, DETACHED, and DELETED.

", "AttachmentStateChange$attachmentArn": "

The Amazon Resource Name (ARN) of the attachment.

", "AttachmentStateChange$status": "

The status of the attachment.

", - "Attribute$name": "

The name of the attribute. Up to 128 letters (uppercase and lowercase), numbers, hyphens, underscores, and periods are allowed.

", - "Attribute$value": "

The value of the attribute. Up to 128 letters (uppercase and lowercase), numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons, and spaces are allowed.

", + "Attribute$name": "

The name of the attribute. The name must contain between 1 and 128 characters and name may contain letters (uppercase and lowercase), numbers, hyphens, underscores, forward slashes, back slashes, or periods.

", + "Attribute$value": "

The value of the attribute. The value must contain between 1 and 128 characters and may contain letters (uppercase and lowercase), numbers, hyphens, underscores, periods, at signs (@), forward slashes, back slashes, colons, or spaces. The value cannot contain any leading or trailing whitespace.

", "Attribute$targetId": "

The ID of the target. You can specify the short form ID for a resource or the full Amazon Resource Name (ARN).

", "AutoScalingGroupProvider$autoScalingGroupArn": "

The Amazon Resource Name (ARN) that identifies the Auto Scaling group.

", "CapacityProvider$capacityProviderArn": "

The Amazon Resource Name (ARN) that identifies the capacity provider.

", @@ -1678,11 +1690,11 @@ "Container$cpu": "

The number of CPU units set for the container. The value will be 0 if no value was specified in the container definition when the task definition was registered.

", "Container$memory": "

The hard limit (in MiB) of memory set for the container.

", "Container$memoryReservation": "

The soft limit (in MiB) of memory set for the container.

", - "ContainerDefinition$name": "

The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

", - "ContainerDefinition$image": "

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

", - "ContainerDefinition$hostname": "

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if you are using the awsvpc network mode.

", - "ContainerDefinition$user": "

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

You can use the following formats. If specifying a UID or GID, you must specify it as a positive integer.

This parameter is not supported for Windows containers.

", - "ContainerDefinition$workingDirectory": "

The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

", + "ContainerDefinition$name": "

The name of a container. If you are linking multiple containers together in a task definition, the name of one container can be entered in the links of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. This parameter maps to name in the Create a container section of the Docker Remote API and the --name option to docker run.

", + "ContainerDefinition$image": "

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

", + "ContainerDefinition$hostname": "

The hostname to use for your container. This parameter maps to Hostname in the Create a container section of the Docker Remote API and the --hostname option to docker run.

The hostname parameter is not supported if you are using the awsvpc network mode.

", + "ContainerDefinition$user": "

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

You can use the following formats. If specifying a UID or GID, you must specify it as a positive integer.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

", + "ContainerDefinition$workingDirectory": "

The working directory in which to run commands inside the container. This parameter maps to WorkingDir in the Create a container section of the Docker Remote API and the --workdir option to docker run.

", "ContainerDependency$containerName": "

The name of a container.

", "ContainerInstance$containerInstanceArn": "

The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID.

", "ContainerInstance$ec2InstanceId": "

The EC2 instance ID of the container instance.

", @@ -1699,7 +1711,7 @@ "CreateClusterRequest$clusterName": "

The name of your cluster. If you do not specify a name for your cluster, you create a cluster named default. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed.

", "CreateServiceRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not specify a cluster, the default cluster is assumed.

", "CreateServiceRequest$serviceName": "

The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a Region or across multiple Regions.

", - "CreateServiceRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used.

A task definition must be specified if the service is using the ECS deployment controller.

", + "CreateServiceRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used.

A task definition must be specified if the service is using either the ECS or CODE_DEPLOY deployment controllers.

", "CreateServiceRequest$clientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.

", "CreateServiceRequest$platformVersion": "

The platform version that your tasks in the service are running on. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

", "CreateServiceRequest$role": "

The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition does not use the awsvpc network mode. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.

If your account has already created the Amazon ECS service-linked role, that role is used by default for your service unless you specify a role here. The service-linked role is required if your task definition uses the awsvpc network mode or if the service is configured to use service discovery, an external deployment controller, multiple target groups, or Elastic Inference accelerators in which case you should not specify a role here. For more information, see Using Service-Linked Roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide.

", @@ -1742,10 +1754,14 @@ "DockerLabelsMap$key": null, "DockerLabelsMap$value": null, "DockerVolumeConfiguration$driver": "

The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see Docker plugin discovery. This parameter maps to Driver in the Create a volume section of the Docker Remote API and the xxdriver option to docker volume create.

", - "EFSAuthorizationConfig$accessPointId": "

The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the EFSVolumeConfiguration will be relative to the directory set for the access point. If an access point is used, transit encryption must be enabled in the EFSVolumeConfiguration. For more information, see Working with Amazon EFS Access Points in the Amazon Elastic File System User Guide.

", + "EFSAuthorizationConfig$accessPointId": "

The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the EFSVolumeConfiguration must either be omitted or set to / which will enforce the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the EFSVolumeConfiguration. For more information, see Working with Amazon EFS Access Points in the Amazon Elastic File System User Guide.

", "EFSVolumeConfiguration$fileSystemId": "

The Amazon EFS file system ID to use.

", - "EFSVolumeConfiguration$rootDirectory": "

The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have the same effect as omitting this parameter.

", + "EFSVolumeConfiguration$rootDirectory": "

The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume will be used. Specifying / will have the same effect as omitting this parameter.

If an EFS access point is specified in the authorizationConfig, the root directory parameter must either be omitted or set to / which will enforce the path set on the EFS access point.

", "EnvironmentFile$value": "

The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file.

", + "FSxWindowsFileServerAuthorizationConfig$credentialsParameter": "

The authorization credential option to use.

The authorization credential options can be provided using either the AWS Secrets Manager ARN or the AWS Systems Manager ARN. The ARNs refer to the stored credentials.

options:

", + "FSxWindowsFileServerAuthorizationConfig$domain": "

A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted EC2 AD.

", + "FSxWindowsFileServerVolumeConfiguration$fileSystemId": "

The Amazon FSx for Windows File Server file system ID to use.

", + "FSxWindowsFileServerVolumeConfiguration$rootDirectory": "

The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host.

", "Failure$arn": "

The Amazon Resource Name (ARN) of the failed resource.

", "Failure$reason": "

The reason for the failure.

", "Failure$detail": "

The details of the failure.

", @@ -1896,7 +1912,7 @@ "TaskDefinition$taskRoleArn": "

The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

", "TaskDefinition$executionRoleArn": "

The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.

", "TaskDefinition$cpu": "

The number of cpu units used by the task. If you are using the EC2 launch type, this field is optional and any value can be used. If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory parameter:

", - "TaskDefinition$memory": "

The amount (in MiB) of memory used by the task.

If using the EC2 launch type, this field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

", + "TaskDefinition$memory": "

The amount (in MiB) of memory used by the task.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see ContainerDefinition.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

", "TaskDefinitionPlacementConstraint$expression": "

A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

", "TaskOverride$cpu": "

The cpu override for the task.

", "TaskOverride$executionRoleArn": "

The Amazon Resource Name (ARN) of the task execution IAM role override for the task.

", @@ -1937,15 +1953,15 @@ "StringList": { "base": null, "refs": { - "AwsVpcConfiguration$subnets": "

The subnets associated with the task or service. There is a limit of 16 subnets that can be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

", - "AwsVpcConfiguration$securityGroups": "

The security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. There is a limit of 5 security groups that can be specified per AwsVpcConfiguration.

All specified security groups must be from the same VPC.

", + "AwsVpcConfiguration$subnets": "

The IDs of the subnets associated with the task or service. There is a limit of 16 subnets that can be specified per AwsVpcConfiguration.

All specified subnets must be from the same VPC.

", + "AwsVpcConfiguration$securityGroups": "

The IDs of the security groups associated with the task or service. If you do not specify a security group, the default security group for the VPC is used. There is a limit of 5 security groups that can be specified per AwsVpcConfiguration.

All specified security groups must be from the same VPC.

", "Cluster$capacityProviders": "

The capacity providers associated with the cluster.

", - "ContainerDefinition$links": "

The links parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. For more information about linking Docker containers, go to Legacy container links in the Docker documentation. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run.

This parameter is not supported for Windows containers.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

", - "ContainerDefinition$entryPoint": "

Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

", - "ContainerDefinition$command": "

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each argument should be a separated string in the array.

", - "ContainerDefinition$dnsServers": "

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers.

", - "ContainerDefinition$dnsSearchDomains": "

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers.

", - "ContainerDefinition$dockerSecurityOptions": "

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

With Windows containers, this parameter can be used to reference a credential spec file when configuring a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers in the Amazon Elastic Container Service Developer Guide.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

", + "ContainerDefinition$links": "

The links parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge. The name:internalName construct is analogous to name:alias in Docker links. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. For more information about linking Docker containers, go to Legacy container links in the Docker documentation. This parameter maps to Links in the Create a container section of the Docker Remote API and the --link option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.

", + "ContainerDefinition$entryPoint": "

Early versions of the Amazon ECS container agent do not properly handle entryPoint parameters. If you have problems using entryPoint, update your container agent or enter your commands and arguments as command array items instead.

The entry point that is passed to the container. This parameter maps to Entrypoint in the Create a container section of the Docker Remote API and the --entrypoint option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.

", + "ContainerDefinition$command": "

The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each argument should be a separated string in the array.

", + "ContainerDefinition$dnsServers": "

A list of DNS servers that are presented to the container. This parameter maps to Dns in the Create a container section of the Docker Remote API and the --dns option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

", + "ContainerDefinition$dnsSearchDomains": "

A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch in the Create a container section of the Docker Remote API and the --dns-search option to docker run.

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

", + "ContainerDefinition$dockerSecurityOptions": "

A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems. This field is not valid for containers in tasks using the Fargate launch type.

With Windows containers, this parameter can be used to reference a credential spec file when configuring a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers in the Amazon Elastic Container Service Developer Guide.

This parameter maps to SecurityOpt in the Create a container section of the Docker Remote API and the --security-opt option to docker run.

The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

For more information about valid values, see Docker Run Security Configuration.

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"

", "ContainerOverride$command": "

The command to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name.

", "CreateClusterRequest$capacityProviders": "

The short name of one or more capacity providers to associate with the cluster.

If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created and not already associated with another cluster. New capacity providers can be created with the CreateCapacityProvider API operation.

To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

The PutClusterCapacityProviders API operation is used to update the list of available capacity providers for a cluster after the cluster is created.

", "DescribeCapacityProvidersRequest$capacityProviders": "

The short name or full Amazon Resource Name (ARN) of one or more capacity providers. Up to 100 capacity providers can be described in an action.

", @@ -1955,8 +1971,8 @@ "DescribeTaskSetsRequest$taskSets": "

The ID or full Amazon Resource Name (ARN) of task sets to describe.

", "DescribeTasksRequest$tasks": "

A list of up to 100 task IDs or full ARN entries.

", "HealthCheck$command": "

A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD to execute the command arguments directly, or CMD-SHELL to run the command with the container's default shell. For example:

[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]

An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck in the Create a container section of the Docker Remote API.

", - "KernelCapabilities$add": "

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

The SYS_PTRACE capability is supported for tasks that use the Fargate launch type if they are also using platform version 1.4.0. The other capabilities are not supported for any platform versions.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

", - "KernelCapabilities$drop": "

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

", + "KernelCapabilities$add": "

The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd in the Create a container section of the Docker Remote API and the --cap-add option to docker run.

Tasks launched on AWS Fargate only support adding the SYS_PTRACE kernel capability.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

", + "KernelCapabilities$drop": "

The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop in the Create a container section of the Docker Remote API and the --cap-drop option to docker run.

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"

", "ListClustersResponse$clusterArns": "

The list of full Amazon Resource Name (ARN) entries for each cluster associated with your account.

", "ListContainerInstancesResponse$containerInstanceArns": "

The list of container instances with full ARN entries for each container instance associated with the specified cluster.

", "ListServicesResponse$serviceArns": "

The list of full ARN entries for each service associated with the specified cluster.

", @@ -2008,7 +2024,7 @@ } }, "SystemControl": { - "base": "

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network mode for the following reasons:

", + "base": "

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network mode for the following reasons:

", "refs": { "SystemControls$member": null } @@ -2016,7 +2032,7 @@ "SystemControls": { "base": null, "refs": { - "ContainerDefinition$systemControls": "

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that is started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers.

" + "ContainerDefinition$systemControls": "

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

It is not recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that is started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers.

" } }, "Tag": { @@ -2249,7 +2265,7 @@ "TmpfsList": { "base": null, "refs": { - "LinuxParameters$tmpfs": "

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you are using tasks that use the Fargate launch type, the tmpfs parameter is not supported.

" + "LinuxParameters$tmpfs": "

The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs option to docker run.

If you are using tasks that use the Fargate launch type, the tmpfs parameter is not supported.

" } }, "TransportProtocol": { @@ -2268,7 +2284,7 @@ "UlimitList": { "base": null, "refs": { - "ContainerDefinition$ulimits": "

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it will override the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" + "ContainerDefinition$ulimits": "

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it will override the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.

" } }, "UlimitName": { @@ -2365,7 +2381,7 @@ } }, "Volume": { - "base": "

A data volume used in a task definition. For tasks that use Amazon Elastic File System (Amazon EFS) file storage, specify an efsVolumeConfiguration. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

", + "base": "

A data volume used in a task definition. For tasks that use the Amazon Elastic File System (Amazon EFS), specify an efsVolumeConfiguration. For Windows tasks that use Amazon FSx for Windows File Server file system, specify a fsxWindowsFileServerVolumeConfiguration. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

", "refs": { "VolumeList$member": null } @@ -2379,7 +2395,7 @@ "VolumeFromList": { "base": null, "refs": { - "ContainerDefinition$volumesFrom": "

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" + "ContainerDefinition$volumesFrom": "

Data volumes to mount from another container. This parameter maps to VolumesFrom in the Create a container section of the Docker Remote API and the --volumes-from option to docker run.

" } }, "VolumeList": { diff --git a/models/apis/es/2015-01-01/api-2.json b/models/apis/es/2015-01-01/api-2.json index 9390b6f2dc8..d32297699cc 100644 --- a/models/apis/es/2015-01-01/api-2.json +++ b/models/apis/es/2015-01-01/api-2.json @@ -357,6 +357,22 @@ {"shape":"InternalException"} ] }, + "GetPackageVersionHistory":{ + "name":"GetPackageVersionHistory", + "http":{ + "method":"GET", + "requestUri":"/2015-01-01/packages/{PackageID}/history" + }, + "input":{"shape":"GetPackageVersionHistoryRequest"}, + "output":{"shape":"GetPackageVersionHistoryResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ] + }, "GetUpgradeHistory":{ "name":"GetUpgradeHistory", "http":{ @@ -553,6 +569,23 @@ {"shape":"ValidationException"} ] }, + "UpdatePackage":{ + "name":"UpdatePackage", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/packages/update" + }, + "input":{"shape":"UpdatePackageRequest"}, + "output":{"shape":"UpdatePackageResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ] + }, "UpgradeElasticsearchDomain":{ "name":"UpgradeElasticsearchDomain", "http":{ @@ -746,6 +779,10 @@ "Status":{"shape":"OptionStatus"} } }, + "CommitMessage":{ + "type":"string", + "max":160 + }, "CompatibleElasticsearchVersionsList":{ "type":"list", "member":{"shape":"CompatibleVersionsMap"} @@ -1227,6 +1264,7 @@ "LastUpdated":{"shape":"LastUpdated"}, "DomainName":{"shape":"DomainName"}, "DomainPackageStatus":{"shape":"DomainPackageStatus"}, + "PackageVersion":{"shape":"PackageVersion"}, "ReferencePath":{"shape":"ReferencePath"}, "ErrorDetails":{"shape":"ErrorDetails"} } @@ -1501,6 +1539,35 @@ "CompatibleElasticsearchVersions":{"shape":"CompatibleElasticsearchVersionsList"} } }, + "GetPackageVersionHistoryRequest":{ + "type":"structure", + "required":["PackageID"], + "members":{ + "PackageID":{ + "shape":"PackageID", + "location":"uri", + "locationName":"PackageID" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "GetPackageVersionHistoryResponse":{ + "type":"structure", + "members":{ + "PackageID":{"shape":"PackageID"}, + "PackageVersionHistoryList":{"shape":"PackageVersionHistoryList"}, + "NextToken":{"shape":"String"} + } + }, "GetUpgradeHistoryRequest":{ "type":"structure", "required":["DomainName"], @@ -1936,6 +2003,8 @@ "PackageDescription":{"shape":"PackageDescription"}, "PackageStatus":{"shape":"PackageStatus"}, "CreatedAt":{"shape":"CreatedAt"}, + "LastUpdatedAt":{"shape":"LastUpdated"}, + "AvailablePackageVersion":{"shape":"PackageVersion"}, "ErrorDetails":{"shape":"ErrorDetails"} } }, @@ -1974,6 +2043,19 @@ "type":"string", "enum":["TXT-DICTIONARY"] }, + "PackageVersion":{"type":"string"}, + "PackageVersionHistory":{ + "type":"structure", + "members":{ + "PackageVersion":{"shape":"PackageVersion"}, + "CommitMessage":{"shape":"CommitMessage"}, + "CreatedAt":{"shape":"CreatedAt"} + } + }, + "PackageVersionHistoryList":{ + "type":"list", + "member":{"shape":"PackageVersionHistory"} + }, "Password":{ "type":"string", "min":8, @@ -2299,6 +2381,25 @@ "DomainConfig":{"shape":"ElasticsearchDomainConfig"} } }, + "UpdatePackageRequest":{ + "type":"structure", + "required":[ + "PackageID", + "PackageSource" + ], + "members":{ + "PackageID":{"shape":"PackageID"}, + "PackageSource":{"shape":"PackageSource"}, + "PackageDescription":{"shape":"PackageDescription"}, + "CommitMessage":{"shape":"CommitMessage"} + } + }, + "UpdatePackageResponse":{ + "type":"structure", + "members":{ + "PackageDetails":{"shape":"PackageDetails"} + } + }, "UpdateTimestamp":{"type":"timestamp"}, "UpgradeElasticsearchDomainRequest":{ "type":"structure", diff --git a/models/apis/es/2015-01-01/docs-2.json b/models/apis/es/2015-01-01/docs-2.json index 3eafe2b0aba..05a3e48e87b 100644 --- a/models/apis/es/2015-01-01/docs-2.json +++ b/models/apis/es/2015-01-01/docs-2.json @@ -25,6 +25,7 @@ "DescribeReservedElasticsearchInstances": "

Returns information about reserved Elasticsearch instances for this account.

", "DissociatePackage": "

Dissociates a package from the Amazon ES domain.

", "GetCompatibleElasticsearchVersions": "

Returns a list of upgrade compatible Elastisearch versions. You can optionally pass a DomainName to get all upgrade compatible Elasticsearch versions for that specific domain.

", + "GetPackageVersionHistory": "

Returns a list of versions of the package, along with their creation time and commit message.

", "GetUpgradeHistory": "

Retrieves the complete history of the last 10 upgrades that were performed on the domain.

", "GetUpgradeStatus": "

Retrieves the latest status of the last upgrade or upgrade eligibility check that was performed on the domain.

", "ListDomainNames": "

Returns the name of all Elasticsearch domains owned by the current user's account.

", @@ -38,6 +39,7 @@ "RemoveTags": "

Removes the specified set of tags from the specified Elasticsearch domain.

", "StartElasticsearchServiceSoftwareUpdate": "

Schedules a service software update for an Amazon ES domain.

", "UpdateElasticsearchDomainConfig": "

Modifies the cluster configuration of the specified Elasticsearch domain, setting as setting the instance type and the number of instances.

", + "UpdatePackage": "

Updates a package for use with Amazon ES domains.

", "UpgradeElasticsearchDomain": "

Allows you to either upgrade your domain or perform an Upgrade eligibility check to a compatible Elasticsearch version.

" }, "shapes": { @@ -208,6 +210,13 @@ "ElasticsearchDomainConfig$CognitoOptions": "

The CognitoOptions for the specified domain. For more information, see Amazon Cognito Authentication for Kibana.

" } }, + "CommitMessage": { + "base": null, + "refs": { + "PackageVersionHistory$CommitMessage": "

A message associated with the version.

", + "UpdatePackageRequest$CommitMessage": "

An info message for the new version which will be shown as part of GetPackageVersionHistoryResponse.

" + } + }, "CompatibleElasticsearchVersionsList": { "base": null, "refs": { @@ -266,7 +275,8 @@ "CreatedAt": { "base": null, "refs": { - "PackageDetails$CreatedAt": "

Timestamp which tells creation date of the package.

" + "PackageDetails$CreatedAt": "

Timestamp which tells creation date of the package.

", + "PackageVersionHistory$CreatedAt": "

Timestamp which tells creation time of the package version.

" } }, "CrossClusterSearchConnectionId": { @@ -768,6 +778,16 @@ "refs": { } }, + "GetPackageVersionHistoryRequest": { + "base": "

Container for request parameters to GetPackageVersionHistory operation.

", + "refs": { + } + }, + "GetPackageVersionHistoryResponse": { + "base": "

Container for response returned by GetPackageVersionHistory operation.

", + "refs": { + } + }, "GetUpgradeHistoryRequest": { "base": "

Container for request parameters to GetUpgradeHistory operation.

", "refs": { @@ -903,7 +923,8 @@ "LastUpdated": { "base": null, "refs": { - "DomainPackageDetails$LastUpdated": "

Timestamp of the most-recent update to the association status.

" + "DomainPackageDetails$LastUpdated": "

Timestamp of the most-recent update to the association status.

", + "PackageDetails$LastUpdatedAt": null } }, "LimitExceededException": { @@ -1039,6 +1060,7 @@ "DescribePackagesRequest$MaxResults": "

Limits results to a maximum number of packages.

", "DescribeReservedElasticsearchInstanceOfferingsRequest$MaxResults": "

Set this value to limit the number of results returned. If not specified, defaults to 100.

", "DescribeReservedElasticsearchInstancesRequest$MaxResults": "

Set this value to limit the number of results returned. If not specified, defaults to 100.

", + "GetPackageVersionHistoryRequest$MaxResults": "

Limits results to a maximum number of versions.

", "GetUpgradeHistoryRequest$MaxResults": null, "ListDomainsForPackageRequest$MaxResults": "

Limits results to a maximum number of domains.

", "ListElasticsearchInstanceTypesRequest$MaxResults": "

Set this value to limit the number of results returned. Value provided must be greater than 30 else it wont be honored.

", @@ -1069,6 +1091,7 @@ "DescribeReservedElasticsearchInstanceOfferingsRequest$NextToken": "

NextToken should be sent in case if earlier API call produced result containing NextToken. It is used for pagination.

", "DescribeReservedElasticsearchInstanceOfferingsResponse$NextToken": "

Provides an identifier to allow retrieval of paginated results.

", "DescribeReservedElasticsearchInstancesRequest$NextToken": "

NextToken should be sent in case if earlier API call produced result containing NextToken. It is used for pagination.

", + "GetPackageVersionHistoryRequest$NextToken": "

Used for pagination. Only necessary if a previous API call includes a non-null NextToken value. If provided, returns results for the next page.

", "GetUpgradeHistoryRequest$NextToken": null, "ListDomainsForPackageRequest$NextToken": "

Used for pagination. Only necessary if a previous API call includes a non-null NextToken value. If provided, returns results for the next page.

", "ListElasticsearchInstanceTypesRequest$NextToken": "

NextToken should be sent in case if earlier API call produced result containing NextToken. It is used for pagination.

", @@ -1159,7 +1182,8 @@ "base": null, "refs": { "CreatePackageRequest$PackageDescription": "

Description of the package.

", - "PackageDetails$PackageDescription": "

User-specified description of the package.

" + "PackageDetails$PackageDescription": "

User-specified description of the package.

", + "UpdatePackageRequest$PackageDescription": "

New description of the package.

" } }, "PackageDetails": { @@ -1167,7 +1191,8 @@ "refs": { "CreatePackageResponse$PackageDetails": "

Information about the package PackageDetails.

", "DeletePackageResponse$PackageDetails": "

PackageDetails

", - "PackageDetailsList$member": null + "PackageDetailsList$member": null, + "UpdatePackageResponse$PackageDetails": "

Information about the package PackageDetails.

" } }, "PackageDetailsList": { @@ -1183,8 +1208,11 @@ "DeletePackageRequest$PackageID": "

Internal ID of the package that you want to delete. Use DescribePackages to find this value.

", "DissociatePackageRequest$PackageID": "

Internal ID of the package that you want to associate with a domain. Use DescribePackages to find this value.

", "DomainPackageDetails$PackageID": "

Internal ID of the package.

", + "GetPackageVersionHistoryRequest$PackageID": "

Returns an audit history of versions of the package.

", + "GetPackageVersionHistoryResponse$PackageID": null, "ListDomainsForPackageRequest$PackageID": "

The package for which to list domains.

", - "PackageDetails$PackageID": "

Internal ID of the package.

" + "PackageDetails$PackageID": "

Internal ID of the package.

", + "UpdatePackageRequest$PackageID": "

Unique identifier for the package.

" } }, "PackageName": { @@ -1198,7 +1226,8 @@ "PackageSource": { "base": "

The S3 location for importing the package specified as S3BucketName and S3Key

", "refs": { - "CreatePackageRequest$PackageSource": "

The customer S3 location PackageSource for importing the package.

" + "CreatePackageRequest$PackageSource": "

The customer S3 location PackageSource for importing the package.

", + "UpdatePackageRequest$PackageSource": null } }, "PackageStatus": { @@ -1215,6 +1244,26 @@ "PackageDetails$PackageType": "

Currently supports only TXT-DICTIONARY.

" } }, + "PackageVersion": { + "base": null, + "refs": { + "DomainPackageDetails$PackageVersion": null, + "PackageDetails$AvailablePackageVersion": null, + "PackageVersionHistory$PackageVersion": "

Version of the package.

" + } + }, + "PackageVersionHistory": { + "base": "

Details of a package version.

", + "refs": { + "PackageVersionHistoryList$member": null + } + }, + "PackageVersionHistoryList": { + "base": null, + "refs": { + "GetPackageVersionHistoryResponse$PackageVersionHistoryList": "

List of PackageVersionHistory objects.

" + } + }, "Password": { "base": null, "refs": { @@ -1468,6 +1517,7 @@ "DescribePackagesResponse$NextToken": null, "DescribeReservedElasticsearchInstancesResponse$NextToken": "

Provides an identifier to allow retrieval of paginated results.

", "EndpointsMap$key": null, + "GetPackageVersionHistoryResponse$NextToken": null, "GetUpgradeHistoryResponse$NextToken": "

Pagination token that needs to be supplied to the next call to get the next page of results

", "ListDomainsForPackageResponse$NextToken": null, "ListPackagesForDomainResponse$NextToken": "

Pagination token that needs to be supplied to the next call to get the next page of results.

", @@ -1545,6 +1595,16 @@ "refs": { } }, + "UpdatePackageRequest": { + "base": "

Container for request parameters to UpdatePackage operation.

", + "refs": { + } + }, + "UpdatePackageResponse": { + "base": "

Container for response returned by UpdatePackage operation.

", + "refs": { + } + }, "UpdateTimestamp": { "base": null, "refs": { diff --git a/models/apis/es/2015-01-01/paginators-1.json b/models/apis/es/2015-01-01/paginators-1.json index 6a5c91f1c0a..2ca3f2e67f7 100644 --- a/models/apis/es/2015-01-01/paginators-1.json +++ b/models/apis/es/2015-01-01/paginators-1.json @@ -25,6 +25,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "GetPackageVersionHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "GetUpgradeHistory": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/fsx/2018-03-01/api-2.json b/models/apis/fsx/2018-03-01/api-2.json index f10e4a43ab1..d099a292171 100644 --- a/models/apis/fsx/2018-03-01/api-2.json +++ b/models/apis/fsx/2018-03-01/api-2.json @@ -13,6 +13,20 @@ "uid":"fsx-2018-03-01" }, "operations":{ + "AssociateFileSystemAliases":{ + "name":"AssociateFileSystemAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateFileSystemAliasesRequest"}, + "output":{"shape":"AssociateFileSystemAliasesResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"InternalServerError"} + ] + }, "CancelDataRepositoryTask":{ "name":"CancelDataRepositoryTask", "http":{ @@ -174,6 +188,20 @@ {"shape":"InternalServerError"} ] }, + "DescribeFileSystemAliases":{ + "name":"DescribeFileSystemAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFileSystemAliasesRequest"}, + "output":{"shape":"DescribeFileSystemAliasesResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"InternalServerError"} + ] + }, "DescribeFileSystems":{ "name":"DescribeFileSystems", "http":{ @@ -188,6 +216,20 @@ {"shape":"InternalServerError"} ] }, + "DisassociateFileSystemAliases":{ + "name":"DisassociateFileSystemAliases", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateFileSystemAliasesRequest"}, + "output":{"shape":"DisassociateFileSystemAliasesResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileSystemNotFound"}, + {"shape":"InternalServerError"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -317,7 +359,9 @@ "type":"string", "enum":[ "FILE_SYSTEM_UPDATE", - "STORAGE_OPTIMIZATION" + "STORAGE_OPTIMIZATION", + "FILE_SYSTEM_ALIAS_ASSOCIATION", + "FILE_SYSTEM_ALIAS_DISASSOCIATION" ] }, "AdministrativeActions":{ @@ -325,12 +369,66 @@ "member":{"shape":"AdministrativeAction"}, "max":50 }, + "Alias":{ + "type":"structure", + "members":{ + "Name":{"shape":"AlternateDNSName"}, + "Lifecycle":{"shape":"AliasLifecycle"} + } + }, + "AliasLifecycle":{ + "type":"string", + "enum":[ + "AVAILABLE", + "CREATING", + "DELETING", + "CREATE_FAILED", + "DELETE_FAILED" + ] + }, + "Aliases":{ + "type":"list", + "member":{"shape":"Alias"}, + "max":50 + }, + "AlternateDNSName":{ + "type":"string", + "max":253, + "min":4, + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{4,253}$" + }, + "AlternateDNSNames":{ + "type":"list", + "member":{"shape":"AlternateDNSName"}, + "max":50 + }, "ArchivePath":{ "type":"string", - "max":900, + "max":4357, "min":3, "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4357}$" }, + "AssociateFileSystemAliasesRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "Aliases" + ], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "FileSystemId":{"shape":"FileSystemId"}, + "Aliases":{"shape":"AlternateDNSNames"} + } + }, + "AssociateFileSystemAliasesResponse":{ + "type":"structure", + "members":{ + "Aliases":{"shape":"Aliases"} + } + }, "AutoImportPolicyType":{ "type":"string", "enum":[ @@ -420,7 +518,8 @@ "type":"string", "enum":[ "AUTOMATIC", - "USER_INITIATED" + "USER_INITIATED", + "AWS_BACKUP" ] }, "Backups":{ @@ -591,7 +690,8 @@ "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "AutomaticBackupRetentionDays":{"shape":"AutomaticBackupRetentionDays"}, - "CopyTagsToBackups":{"shape":"Flag"} + "CopyTagsToBackups":{"shape":"Flag"}, + "Aliases":{"shape":"AlternateDNSNames"} } }, "CreationTime":{"type":"timestamp"}, @@ -856,6 +956,26 @@ "NextToken":{"shape":"NextToken"} } }, + "DescribeFileSystemAliasesRequest":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "FileSystemId":{"shape":"FileSystemId"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeFileSystemAliasesResponse":{ + "type":"structure", + "members":{ + "Aliases":{"shape":"Aliases"}, + "NextToken":{"shape":"NextToken"} + } + }, "DescribeFileSystemsRequest":{ "type":"structure", "members":{ @@ -890,6 +1010,27 @@ "min":1, "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,256}$" }, + "DisassociateFileSystemAliasesRequest":{ + "type":"structure", + "required":[ + "FileSystemId", + "Aliases" + ], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "FileSystemId":{"shape":"FileSystemId"}, + "Aliases":{"shape":"AlternateDNSNames"} + } + }, + "DisassociateFileSystemAliasesResponse":{ + "type":"structure", + "members":{ + "Aliases":{"shape":"Aliases"} + } + }, "DnsIps":{ "type":"list", "member":{"shape":"IpAddress"}, @@ -1493,7 +1634,8 @@ "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "AutomaticBackupRetentionDays":{"shape":"AutomaticBackupRetentionDays"}, - "CopyTagsToBackups":{"shape":"Flag"} + "CopyTagsToBackups":{"shape":"Flag"}, + "Aliases":{"shape":"Aliases"} } } } diff --git a/models/apis/fsx/2018-03-01/docs-2.json b/models/apis/fsx/2018-03-01/docs-2.json index 1219652674d..c587190316d 100644 --- a/models/apis/fsx/2018-03-01/docs-2.json +++ b/models/apis/fsx/2018-03-01/docs-2.json @@ -2,6 +2,7 @@ "version": "2.0", "service": "

Amazon FSx is a fully managed service that makes it easy for storage and application administrators to launch and use shared file storage.

", "operations": { + "AssociateFileSystemAliases": "

Use this action to associate one or more Domain Name Server (DNS) aliases with an existing Amazon FSx for Windows File Server file system. A file systen can have a maximum of 50 DNS aliases associated with it at any one time. If you try to associate a DNS alias that is already associated with the file system, FSx takes no action on that alias in the request. For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.

The system response shows the DNS aliases that Amazon FSx is attempting to associate with the file system. Use the API operation to monitor the status of the aliases Amazon FSx is associating with the file system.

", "CancelDataRepositoryTask": "

Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel a task, Amazon FSx does the following.

", "CreateBackup": "

Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.

For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Lustre backups.

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Windows backups.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateBackup operation returns while the backup's lifecycle state is still CREATING. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", "CreateDataRepositoryTask": "

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repository. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Using Data Repository Tasks. To learn more about linking a data repository to your file system, see Setting the Export Prefix.

", @@ -11,7 +12,9 @@ "DeleteFileSystem": "

Deletes a file system, deleting its contents. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups will also be deleted.

By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup is not subject to the file system's retention policy, and must be manually deleted.

The DeleteFileSystem action returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems action, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems returns a FileSystemNotFound error.

Deleting an Amazon FSx for Lustre file system will fail with a 400 BadRequest if a data repository task is in a PENDING or EXECUTING state.

The data in a deleted file system is also deleted and can't be recovered by any means.

", "DescribeBackups": "

Returns the description of specific Amazon FSx backups, if a BackupIds value is provided for that backup. Otherwise, it returns all backups owned by your AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all backups, you can optionally specify the MaxResults parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This action is used in an iterative process to retrieve a list of your backups. DescribeBackups is called first without a NextTokenvalue. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this action, keep the following in mind:

", "DescribeDataRepositoryTasks": "

Returns the description of specific Amazon FSx for Lustre data repository tasks, if one or more TaskIds values are provided in the request, or if filters are used in the request. You can use filters to narrow the response to include just tasks for specific file systems, or tasks in a specific lifecycle state. Otherwise, it returns all data repository tasks owned by your AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all tasks, you can paginate the response by using the optional MaxResults parameter to limit the number of tasks returned in a response. If more tasks remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

", + "DescribeFileSystemAliases": "

Returns the DNS aliases that are associated with the specified Amazon FSx for Windows File Server file system. A history of all DNS aliases that have been associated with and disassociated from the file system is available in the list of AdministrativeAction provided in the DescribeFileSystems operation response.

", "DescribeFileSystems": "

Returns the description of specific Amazon FSx file systems, if a FileSystemIds value is provided for that file system. Otherwise, it returns descriptions of all file systems owned by your AWS account in the AWS Region of the endpoint that you're calling.

When retrieving all file system descriptions, you can optionally specify the MaxResults parameter to limit the number of descriptions in a response. If more file system descriptions remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This action is used in an iterative process to retrieve a list of your file system descriptions. DescribeFileSystems is called first without a NextTokenvalue. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this action, keep the following in mind:

", + "DisassociateFileSystemAliases": "

Use this action to disassociate, or remove, one or more Domain Name Service (DNS) aliases from an Amazon FSx for Windows File Server file system. If you attempt to disassociate a DNS alias that is not associated with the file system, Amazon FSx responds with a 400 Bad Request. For more information, see Working with DNS Aliases.

The system generated response showing the DNS aliases that Amazon FSx is attempting to disassociate from the file system. Use the API operation to monitor the status of the aliases Amazon FSx is disassociating with the file system.

", "ListTagsForResource": "

Lists tags for an Amazon FSx file systems and backups in the case of Amazon FSx for Windows File Server.

When retrieving all tags, you can optionally specify the MaxResults parameter to limit the number of tags in a response. If more tags remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This action is used in an iterative process to retrieve a list of your tags. ListTagsForResource is called first without a NextTokenvalue. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this action, keep the following in mind:

", "TagResource": "

Tags an Amazon FSx resource.

", "UntagResource": "

This action removes a tag from an Amazon FSx resource.

", @@ -62,7 +65,7 @@ } }, "AdministrativeActionType": { - "base": "

Describes the type of administrative action, as follows:

", + "base": "

Describes the type of administrative action, as follows:

", "refs": { "AdministrativeAction$AdministrativeActionType": null } @@ -73,6 +76,42 @@ "FileSystem$AdministrativeActions": "

A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Windows file system that you have initiated using the UpdateFileSystem action.

" } }, + "Alias": { + "base": "

A DNS alias that is associated with the file system. You can use a DNS alias to access a file system using user-defined DNS names, in addition to the default DNS name that Amazon FSx assigns to the file system. For more information, see DNS aliases in the FSx for Windows File Server User Guide.

", + "refs": { + "Aliases$member": null + } + }, + "AliasLifecycle": { + "base": null, + "refs": { + "Alias$Lifecycle": "

Describes the state of the DNS alias.

" + } + }, + "Aliases": { + "base": "

An array of one or more DNS aliases that are currently associated with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see DNS aliases.

", + "refs": { + "AssociateFileSystemAliasesResponse$Aliases": "

An array of the DNS aliases that Amazon FSx is associating with the file system.

", + "DescribeFileSystemAliasesResponse$Aliases": "

An array of one or more DNS aliases currently associated with the specified file system.

", + "DisassociateFileSystemAliasesResponse$Aliases": "

An array of one or more DNS aliases that Amazon FSx is attempting to disassociate from the file system.

", + "WindowsFileSystemConfiguration$Aliases": null + } + }, + "AlternateDNSName": { + "base": null, + "refs": { + "Alias$Name": "

The name of the DNS alias. The alias name has to meet the following requirements:

For DNS names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

", + "AlternateDNSNames$member": null + } + }, + "AlternateDNSNames": { + "base": null, + "refs": { + "AssociateFileSystemAliasesRequest$Aliases": "

An array of one or more DNS alias names to associate with the file system. The alias name has to comply with the following formatting requirements:

For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

", + "CreateFileSystemWindowsConfiguration$Aliases": "

An array of one or more DNS alias names that you want to associate with the Amazon FSx file system. Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system. You can associate up to 50 aliases with a file system at any time. You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation. You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload.

For more information, see Working with DNS Aliases and Walkthrough 5: Using DNS aliases to access your file system, including additional steps you must take to be able to access your file system using a DNS alias.

An alias name has to meet the following requirements:

For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

", + "DisassociateFileSystemAliasesRequest$Aliases": "

An array of one or more DNS alias names to disassociate, or remove, from the file system.

" + } + }, "ArchivePath": { "base": null, "refs": { @@ -83,6 +122,16 @@ "DataRepositoryConfiguration$ExportPath": "

The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.

" } }, + "AssociateFileSystemAliasesRequest": { + "base": "

The request object specifying one or more DNS alias names to associate with an Amazon FSx for Windows File Server file system.

", + "refs": { + } + }, + "AssociateFileSystemAliasesResponse": { + "base": "

The system generated response showing the DNS aliases that Amazon FSx is attempting to associate with the file system. Use the API operation to monitor the status of the aliases Amazon FSx is associating with the file system. It can take up to 2.5 minutes for the alias status to change from CREATING to AVAILABLE.

", + "refs": { + } + }, "AutoImportPolicyType": { "base": null, "refs": { @@ -185,12 +234,15 @@ "ClientRequestToken": { "base": "

(Optional) An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", "refs": { + "AssociateFileSystemAliasesRequest$ClientRequestToken": null, "CreateBackupRequest$ClientRequestToken": "

(Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", "CreateDataRepositoryTaskRequest$ClientRequestToken": null, "CreateFileSystemFromBackupRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", "CreateFileSystemRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

", "DeleteBackupRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This is automatically filled on your behalf when using the AWS CLI or SDK.

", "DeleteFileSystemRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This is automatically filled on your behalf when using the AWS CLI or SDK.

", + "DescribeFileSystemAliasesRequest$ClientRequestToken": null, + "DisassociateFileSystemAliasesRequest$ClientRequestToken": null, "UpdateFileSystemRequest$ClientRequestToken": "

A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK.

" } }, @@ -460,6 +512,16 @@ "refs": { } }, + "DescribeFileSystemAliasesRequest": { + "base": "

The request object for DescribeFileSystemAliases operation.

", + "refs": { + } + }, + "DescribeFileSystemAliasesResponse": { + "base": "

The response object for DescribeFileSystemAliases operation.

", + "refs": { + } + }, "DescribeFileSystemsRequest": { "base": "

The request object for DescribeFileSystems operation.

", "refs": { @@ -494,6 +556,16 @@ "SelfManagedActiveDirectoryConfigurationUpdates$UserName": "

The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.

" } }, + "DisassociateFileSystemAliasesRequest": { + "base": "

The request object of DNS aliases to disassociate from an Amazon FSx for Windows File Server file system.

", + "refs": { + } + }, + "DisassociateFileSystemAliasesResponse": { + "base": "

The system generated response showing the DNS aliases that Amazon FSx is attempting to disassociate from the file system. Use the API operation to monitor the status of the aliases Amazon FSx is removing from the file system.

", + "refs": { + } + }, "DnsIps": { "base": null, "refs": { @@ -519,7 +591,7 @@ "base": "

A detailed error message.

", "refs": { "ActiveDirectoryError$Message": null, - "AdministrativeActionFailureDetails$Message": "

Error message providing details about the failure.

", + "AdministrativeActionFailureDetails$Message": "

Error message providing details about the failed administrative action.

", "BackupFailureDetails$Message": "

A message describing the backup creation failure.

", "BackupInProgress$Message": null, "BackupNotFound$Message": null, @@ -555,7 +627,7 @@ "FileSystem": { "base": "

A description of a specific Amazon FSx file system.

", "refs": { - "AdministrativeAction$TargetFileSystemValues": "

Describes the target StorageCapacity or ThroughputCapacity value provided in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative actions.

", + "AdministrativeAction$TargetFileSystemValues": "

Describes the target value for the administration action, provided in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative actions.

", "Backup$FileSystem": "

Metadata of the file system associated with the backup. This metadata is persisted even if the file system is deleted.

", "CreateFileSystemFromBackupResponse$FileSystem": "

A description of the file system.

", "CreateFileSystemResponse$FileSystem": "

The configuration of the file system that was created.

", @@ -579,12 +651,15 @@ "FileSystemId": { "base": "

The globally unique ID of the file system, assigned by Amazon FSx.

", "refs": { + "AssociateFileSystemAliasesRequest$FileSystemId": "

Specifies the file system with which you want to associate one or more DNS aliases.

", "BackupRestoring$FileSystemId": "

The ID of a file system being restored from the backup.

", "CreateBackupRequest$FileSystemId": "

The ID of the file system to back up.

", "CreateDataRepositoryTaskRequest$FileSystemId": null, "DataRepositoryTask$FileSystemId": null, "DeleteFileSystemRequest$FileSystemId": "

The ID of the file system you want to delete.

", "DeleteFileSystemResponse$FileSystemId": "

The ID of the file system being deleted.

", + "DescribeFileSystemAliasesRequest$FileSystemId": "

The ID of the file system to return the associated DNS aliases for (String).

", + "DisassociateFileSystemAliasesRequest$FileSystemId": "

Specifies the file system from which to disassociate the DNS aliases.

", "FileSystem$FileSystemId": "

The system-generated, unique 17-digit ID of the file system.

", "FileSystemIds$member": null, "UpdateFileSystemRequest$FileSystemId": "

Identifies the file system that you are updating.

" @@ -760,6 +835,7 @@ "refs": { "DescribeBackupsRequest$MaxResults": "

Maximum number of backups to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

", "DescribeDataRepositoryTasksRequest$MaxResults": null, + "DescribeFileSystemAliasesRequest$MaxResults": "

Maximum number of DNS aliases to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

", "DescribeFileSystemsRequest$MaxResults": "

Maximum number of file systems to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

", "ListTagsForResourceRequest$MaxResults": "

Maximum number of tags to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.

" } @@ -803,6 +879,8 @@ "DescribeBackupsResponse$NextToken": "

This is present if there are more backups than returned in the response (String). You can use the NextToken value in the later request to fetch the backups.

", "DescribeDataRepositoryTasksRequest$NextToken": null, "DescribeDataRepositoryTasksResponse$NextToken": null, + "DescribeFileSystemAliasesRequest$NextToken": "

Opaque pagination token returned from a previous DescribeFileSystemAliases operation (String). If a token is included in the request, the action continues the list from where the previous returning call left off.

", + "DescribeFileSystemAliasesResponse$NextToken": "

Present if there are more DNS aliases than returned in the response (String). You can use the NextToken value in a later request to fetch additional descriptions.

", "DescribeFileSystemsRequest$NextToken": "

Opaque pagination token returned from a previous DescribeFileSystems operation (String). If a token present, the action continues the list from where the returning call left off.

", "DescribeFileSystemsResponse$NextToken": "

Present if there are more file systems than returned in the response (String). You can use the NextToken value in the later request to fetch the descriptions.

", "ListTagsForResourceRequest$NextToken": "

Opaque pagination token returned from a previous ListTagsForResource operation (String). If a token present, the action continues the list from where the returning call left off.

", @@ -837,7 +915,7 @@ "ProgressPercent": { "base": "

The current percent of progress of an asynchronous task.

", "refs": { - "AdministrativeAction$ProgressPercent": "

Provides the percent complete of a STORAGE_OPTIMIZATION administrative action.

", + "AdministrativeAction$ProgressPercent": "

Provides the percent complete of a STORAGE_OPTIMIZATION administrative action. Does not apply to any other administrative action type.

", "Backup$ProgressPercent": null } }, diff --git a/models/apis/fsx/2018-03-01/examples-1.json b/models/apis/fsx/2018-03-01/examples-1.json index 90ed818094e..1f4d652ff2e 100644 --- a/models/apis/fsx/2018-03-01/examples-1.json +++ b/models/apis/fsx/2018-03-01/examples-1.json @@ -51,11 +51,12 @@ "input": { "ClientRequestToken": "a8ca07e4-61ec-4399-99f4-19853801bcd5", "FileSystemType": "WINDOWS", - "KmsKeyId": "arn:aws:kms:us-east-1:012345678912:key/0ff3ea8d-130e-4133-877f-93908b6fdbd6", + "KmsKeyId": "arn:aws:kms:us-east-1:012345678912:key/1111abcd-2222-3333-4444-55556666eeff", "SecurityGroupIds": [ "sg-edcd9784" ], - "StorageCapacity": 300, + "StorageCapacity": 3200, + "StorageType": "HDD", "SubnetIds": [ "subnet-1234abcd" ], @@ -67,22 +68,26 @@ ], "WindowsConfiguration": { "ActiveDirectoryId": "d-1234abcd12", + "Aliases": [ + "accounting.corp.example.com" + ], "AutomaticBackupRetentionDays": 30, "DailyAutomaticBackupStartTime": "05:00", - "ThroughputCapacity": 8, + "ThroughputCapacity": 32, "WeeklyMaintenanceStartTime": "1:05:00" } }, "output": { "FileSystem": { "CreationTime": "1481841524.0", - "DNSName": "fs-0498eed5fe91001ec.fsx.com", - "FileSystemId": "fs-0498eed5fe91001ec", - "KmsKeyId": "arn:aws:kms:us-east-1:012345678912:key/0ff3ea8d-130e-4133-877f-93908b6fdbd6", + "DNSName": "fs-0123456789abcdef0.fsx.com", + "FileSystemId": "fs-0123456789abcdef0", + "KmsKeyId": "arn:aws:kms:us-east-1:012345678912:key/1111abcd-2222-3333-4444-55556666eeff", "Lifecycle": "CREATING", "OwnerId": "012345678912", - "ResourceARN": "arn:aws:fsx:us-east-1:012345678912:file-system/fs-0498eed5fe91001ec", - "StorageCapacity": 300, + "ResourceARN": "arn:aws:fsx:us-east-1:012345678912:file-system/fs-0123456789abcdef0", + "StorageCapacity": 3200, + "StorageType": "HDD", "SubnetIds": [ "subnet-1234abcd" ], @@ -95,16 +100,22 @@ "VpcId": "vpc-ab1234cd", "WindowsConfiguration": { "ActiveDirectoryId": "d-1234abcd12", + "Aliases": [ + { + "Lifecycle": "CREATING", + "Name": "accounting.corp.example.com" + } + ], "AutomaticBackupRetentionDays": 30, "DailyAutomaticBackupStartTime": "05:00", - "ThroughputCapacity": 8, + "ThroughputCapacity": 32, "WeeklyMaintenanceStartTime": "1:05:00" } } }, "comments": { }, - "description": "This operation creates a new file system.", + "description": "This operation creates a new Amazon FSx for Windows File Server file system.", "id": "to-create-a-new-file-system-1481840798547", "title": "To create a new file system" } diff --git a/models/apis/fsx/2018-03-01/paginators-1.json b/models/apis/fsx/2018-03-01/paginators-1.json index 43df3a5f3a7..000eb57d5e8 100644 --- a/models/apis/fsx/2018-03-01/paginators-1.json +++ b/models/apis/fsx/2018-03-01/paginators-1.json @@ -10,6 +10,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "DescribeFileSystemAliases": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "DescribeFileSystems": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/iotanalytics/2017-11-27/api-2.json b/models/apis/iotanalytics/2017-11-27/api-2.json index 6ae405ebd7b..1f727d22ed3 100644 --- a/models/apis/iotanalytics/2017-11-27/api-2.json +++ b/models/apis/iotanalytics/2017-11-27/api-2.json @@ -676,7 +676,8 @@ "status":{"shape":"ChannelStatus"}, "retentionPeriod":{"shape":"RetentionPeriod"}, "creationTime":{"shape":"Timestamp"}, - "lastUpdateTime":{"shape":"Timestamp"} + "lastUpdateTime":{"shape":"Timestamp"}, + "lastMessageArrivalTime":{"shape":"Timestamp"} } }, "ChannelActivity":{ @@ -737,7 +738,8 @@ "channelStorage":{"shape":"ChannelStorageSummary"}, "status":{"shape":"ChannelStatus"}, "creationTime":{"shape":"Timestamp"}, - "lastUpdateTime":{"shape":"Timestamp"} + "lastUpdateTime":{"shape":"Timestamp"}, + "lastMessageArrivalTime":{"shape":"Timestamp"} } }, "ComputeType":{ @@ -787,7 +789,8 @@ "shape":"DatasetName", "location":"uri", "locationName":"datasetName" - } + }, + "versionId":{"shape":"DatasetContentVersion"} } }, "CreateDatasetContentResponse":{ @@ -809,7 +812,8 @@ "contentDeliveryRules":{"shape":"DatasetContentDeliveryRules"}, "retentionPeriod":{"shape":"RetentionPeriod"}, "versioningConfiguration":{"shape":"VersioningConfiguration"}, - "tags":{"shape":"TagList"} + "tags":{"shape":"TagList"}, + "lateDataRules":{"shape":"LateDataRules"} } }, "CreateDatasetResponse":{ @@ -909,7 +913,8 @@ "creationTime":{"shape":"Timestamp"}, "lastUpdateTime":{"shape":"Timestamp"}, "retentionPeriod":{"shape":"RetentionPeriod"}, - "versioningConfiguration":{"shape":"VersioningConfiguration"} + "versioningConfiguration":{"shape":"VersioningConfiguration"}, + "lateDataRules":{"shape":"LateDataRules"} } }, "DatasetAction":{ @@ -1077,7 +1082,8 @@ "status":{"shape":"DatastoreStatus"}, "retentionPeriod":{"shape":"RetentionPeriod"}, "creationTime":{"shape":"Timestamp"}, - "lastUpdateTime":{"shape":"Timestamp"} + "lastUpdateTime":{"shape":"Timestamp"}, + "lastMessageArrivalTime":{"shape":"Timestamp"} } }, "DatastoreActivity":{ @@ -1137,7 +1143,8 @@ "datastoreStorage":{"shape":"DatastoreStorageSummary"}, "status":{"shape":"DatastoreStatus"}, "creationTime":{"shape":"Timestamp"}, - "lastUpdateTime":{"shape":"Timestamp"} + "lastUpdateTime":{"shape":"Timestamp"}, + "lastMessageArrivalTime":{"shape":"Timestamp"} } }, "DeleteChannelRequest":{ @@ -1211,6 +1218,13 @@ "timeExpression":{"shape":"TimeExpression"} } }, + "DeltaTimeSessionWindowConfiguration":{ + "type":"structure", + "required":["timeoutInMinutes"], + "members":{ + "timeoutInMinutes":{"shape":"SessionTimeoutInMinutes"} + } + }, "DescribeChannelRequest":{ "type":"structure", "required":["channelName"], @@ -1469,6 +1483,32 @@ "min":1, "pattern":"^[a-zA-Z0-9_-]+$" }, + "LateDataRule":{ + "type":"structure", + "required":["ruleConfiguration"], + "members":{ + "ruleName":{"shape":"LateDataRuleName"}, + "ruleConfiguration":{"shape":"LateDataRuleConfiguration"} + } + }, + "LateDataRuleConfiguration":{ + "type":"structure", + "members":{ + "deltaTimeSessionWindowConfiguration":{"shape":"DeltaTimeSessionWindowConfiguration"} + } + }, + "LateDataRuleName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9_]+$" + }, + "LateDataRules":{ + "type":"list", + "member":{"shape":"LateDataRule"}, + "max":1, + "min":1 + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -1988,6 +2028,11 @@ "exception":true, "fault":true }, + "SessionTimeoutInMinutes":{ + "type":"integer", + "max":60, + "min":1 + }, "SizeInBytes":{"type":"double"}, "SqlQuery":{"type":"string"}, "SqlQueryDatasetAction":{ @@ -2148,7 +2193,8 @@ "triggers":{"shape":"DatasetTriggers"}, "contentDeliveryRules":{"shape":"DatasetContentDeliveryRules"}, "retentionPeriod":{"shape":"RetentionPeriod"}, - "versioningConfiguration":{"shape":"VersioningConfiguration"} + "versioningConfiguration":{"shape":"VersioningConfiguration"}, + "lateDataRules":{"shape":"LateDataRules"} } }, "UpdateDatastoreRequest":{ diff --git a/models/apis/iotanalytics/2017-11-27/docs-2.json b/models/apis/iotanalytics/2017-11-27/docs-2.json index be613d9f379..85984827399 100644 --- a/models/apis/iotanalytics/2017-11-27/docs-2.json +++ b/models/apis/iotanalytics/2017-11-27/docs-2.json @@ -5,32 +5,32 @@ "BatchPutMessage": "

Sends messages to a channel.

", "CancelPipelineReprocessing": "

Cancels the reprocessing of data through the pipeline.

", "CreateChannel": "

Creates a channel. A channel collects data from an MQTT topic and archives the raw, unprocessed messages before publishing the data to a pipeline.

", - "CreateDataset": "

Creates a data set. A data set stores data retrieved from a data store by applying a \"queryAction\" (a SQL query) or a \"containerAction\" (executing a containerized application). This operation creates the skeleton of a data set. The data set can be populated manually by calling \"CreateDatasetContent\" or automatically according to a \"trigger\" you specify.

", - "CreateDatasetContent": "

Creates the content of a data set by applying a \"queryAction\" (a SQL query) or a \"containerAction\" (executing a containerized application).

", + "CreateDataset": "

Creates a dataset. A dataset stores data retrieved from a data store by applying a queryAction (a SQL query) or a containerAction (executing a containerized application). This operation creates the skeleton of a dataset. The dataset can be populated manually by calling CreateDatasetContent or automatically according to a trigger you specify.

", + "CreateDatasetContent": "

Creates the content of a data set by applying a queryAction (a SQL query) or a containerAction (executing a containerized application).

", "CreateDatastore": "

Creates a data store, which is a repository for messages.

", "CreatePipeline": "

Creates a pipeline. A pipeline consumes messages from a channel and allows you to process the messages before storing them in a data store. You must specify both a channel and a datastore activity and, optionally, as many as 23 additional activities in the pipelineActivities array.

", "DeleteChannel": "

Deletes the specified channel.

", - "DeleteDataset": "

Deletes the specified data set.

You do not have to delete the content of the data set before you perform this operation.

", - "DeleteDatasetContent": "

Deletes the content of the specified data set.

", + "DeleteDataset": "

Deletes the specified dataset.

You do not have to delete the content of the dataset before you perform this operation.

", + "DeleteDatasetContent": "

Deletes the content of the specified dataset.

", "DeleteDatastore": "

Deletes the specified data store.

", "DeletePipeline": "

Deletes the specified pipeline.

", "DescribeChannel": "

Retrieves information about a channel.

", - "DescribeDataset": "

Retrieves information about a data set.

", + "DescribeDataset": "

Retrieves information about a dataset.

", "DescribeDatastore": "

Retrieves information about a data store.

", "DescribeLoggingOptions": "

Retrieves the current settings of the AWS IoT Analytics logging options.

", "DescribePipeline": "

Retrieves information about a pipeline.

", - "GetDatasetContent": "

Retrieves the contents of a data set as pre-signed URIs.

", + "GetDatasetContent": "

Retrieves the contents of a data set as presigned URIs.

", "ListChannels": "

Retrieves a list of channels.

", "ListDatasetContents": "

Lists information about data set contents that have been created.

", "ListDatasets": "

Retrieves information about data sets.

", "ListDatastores": "

Retrieves a list of data stores.

", "ListPipelines": "

Retrieves a list of pipelines.

", - "ListTagsForResource": "

Lists the tags (metadata) which you have assigned to the resource.

", - "PutLoggingOptions": "

Sets or updates the AWS IoT Analytics logging options.

Note that if you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy) it takes up to 5 minutes for that change to take effect.

", + "ListTagsForResource": "

Lists the tags (metadata) that you have assigned to the resource.

", + "PutLoggingOptions": "

Sets or updates the AWS IoT Analytics logging options.

If you update the value of any loggingOptions field, it takes up to one minute for the change to take effect. Also, if you change the policy attached to the role you specified in the roleArn field (for example, to correct an invalid policy), it takes up to five minutes for that change to take effect.

", "RunPipelineActivity": "

Simulates the results of running a pipeline activity on a message payload.

", "SampleChannelData": "

Retrieves a sample of messages from the specified channel ingested during the specified timeframe. Up to 10 messages can be retrieved.

", "StartPipelineReprocessing": "

Starts the reprocessing of raw message data through the pipeline.

", - "TagResource": "

Adds to or modifies the tags of the given resource. Tags are metadata which can be used to manage a resource.

", + "TagResource": "

Adds to or modifies the tags of the given resource. Tags are metadata that can be used to manage a resource.

", "UntagResource": "

Removes the given tags (metadata) from the resource.

", "UpdateChannel": "

Updates the settings of a channel.

", "UpdateDataset": "

Updates the settings of a data set.

", @@ -41,30 +41,30 @@ "ActivityBatchSize": { "base": null, "refs": { - "LambdaActivity$batchSize": "

The number of messages passed to the Lambda function for processing.

The AWS Lambda function must be able to process all of these messages within five minutes, which is the maximum timeout duration for Lambda functions.

" + "LambdaActivity$batchSize": "

The number of messages passed to the Lambda function for processing.

The Lambda function must be able to process all of these messages within five minutes, which is the maximum timeout duration for Lambda functions.

" } }, "ActivityName": { "base": null, "refs": { - "AddAttributesActivity$name": "

The name of the 'addAttributes' activity.

", + "AddAttributesActivity$name": "

The name of the addAttributes activity.

", "AddAttributesActivity$next": "

The next activity in the pipeline.

", - "ChannelActivity$name": "

The name of the 'channel' activity.

", + "ChannelActivity$name": "

The name of the channel activity.

", "ChannelActivity$next": "

The next activity in the pipeline.

", - "DatastoreActivity$name": "

The name of the 'datastore' activity.

", - "DeviceRegistryEnrichActivity$name": "

The name of the 'deviceRegistryEnrich' activity.

", + "DatastoreActivity$name": "

The name of the datastore activity.

", + "DeviceRegistryEnrichActivity$name": "

The name of the deviceRegistryEnrich activity.

", "DeviceRegistryEnrichActivity$next": "

The next activity in the pipeline.

", - "DeviceShadowEnrichActivity$name": "

The name of the 'deviceShadowEnrich' activity.

", + "DeviceShadowEnrichActivity$name": "

The name of the deviceShadowEnrich activity.

", "DeviceShadowEnrichActivity$next": "

The next activity in the pipeline.

", - "FilterActivity$name": "

The name of the 'filter' activity.

", + "FilterActivity$name": "

The name of the filter activity.

", "FilterActivity$next": "

The next activity in the pipeline.

", - "LambdaActivity$name": "

The name of the 'lambda' activity.

", + "LambdaActivity$name": "

The name of the lambda activity.

", "LambdaActivity$next": "

The next activity in the pipeline.

", - "MathActivity$name": "

The name of the 'math' activity.

", + "MathActivity$name": "

The name of the math activity.

", "MathActivity$next": "

The next activity in the pipeline.

", - "RemoveAttributesActivity$name": "

The name of the 'removeAttributes' activity.

", + "RemoveAttributesActivity$name": "

The name of the removeAttributes activity.

", "RemoveAttributesActivity$next": "

The next activity in the pipeline.

", - "SelectAttributesActivity$name": "

The name of the 'selectAttributes' activity.

", + "SelectAttributesActivity$name": "

The name of the selectAttributes activity.

", "SelectAttributesActivity$next": "

The next activity in the pipeline.

" } }, @@ -90,7 +90,7 @@ "AttributeNameMapping": { "base": null, "refs": { - "AddAttributesActivity$attributes": "

A list of 1-50 \"AttributeNameMapping\" objects that map an existing attribute to a new attribute.

The existing attributes remain in the message, so if you want to remove the originals, use \"RemoveAttributeActivity\".

" + "AddAttributesActivity$attributes": "

A list of 1-50 AttributeNameMapping objects that map an existing attribute to a new attribute.

The existing attributes remain in the message, so if you want to remove the originals, use RemoveAttributeActivity.

" } }, "AttributeNames": { @@ -125,17 +125,17 @@ "BucketKeyExpression": { "base": null, "refs": { - "S3DestinationConfiguration$key": "

The key of the data set contents object. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). To produce a unique key, you can use \"!{iotanalytics:scheduledTime}\" to insert the time of the scheduled SQL query run, or \"!{iotanalytics:versioned} to insert a unique hash identifying the data set, for example: \"/DataSet/!{iotanalytics:scheduledTime}/!{iotanalytics:versioned}.csv\".

" + "S3DestinationConfiguration$key": "

The key of the dataset contents object in an S3 bucket. Each object has a key that is a unique identifier. Each object has exactly one key.

You can create a unique key with the following options:

The following example creates a unique key for a CSV file: dataset/mydataset/!{iotanalytics:scheduleTime}/!{iotanalytics:versionId}.csv

If you don't use !{iotanalytics:versionId} to specify the key, you might get duplicate keys. For example, you might have two dataset contents with the same scheduleTime but different versionIds. This means that one dataset content overwrites the other.

" } }, "BucketName": { "base": null, "refs": { - "CustomerManagedChannelS3Storage$bucket": "

The name of the Amazon S3 bucket in which channel data is stored.

", - "CustomerManagedChannelS3StorageSummary$bucket": "

The name of the Amazon S3 bucket in which channel data is stored.

", - "CustomerManagedDatastoreS3Storage$bucket": "

The name of the Amazon S3 bucket in which data store data is stored.

", - "CustomerManagedDatastoreS3StorageSummary$bucket": "

The name of the Amazon S3 bucket in which data store data is stored.

", - "S3DestinationConfiguration$bucket": "

The name of the Amazon S3 bucket to which data set contents are delivered.

" + "CustomerManagedChannelS3Storage$bucket": "

The name of the S3 bucket in which channel data is stored.

", + "CustomerManagedChannelS3StorageSummary$bucket": "

The name of the S3 bucket in which channel data is stored.

", + "CustomerManagedDatastoreS3Storage$bucket": "

The name of the S3 bucket in which data store data is stored.

", + "CustomerManagedDatastoreS3StorageSummary$bucket": "

The name of the S3 bucket in which data store data is stored.

", + "S3DestinationConfiguration$bucket": "

The name of the S3 bucket to which dataset contents are delivered.

" } }, "CancelPipelineReprocessingRequest": { @@ -185,7 +185,7 @@ "ChannelStatistics": { "base": "

Statistics information about the channel.

", "refs": { - "DescribeChannelResponse$statistics": "

Statistics about the channel. Included if the 'includeStatistics' parameter is set to true in the request.

" + "DescribeChannelResponse$statistics": "

Statistics about the channel. Included if the includeStatistics parameter is set to true in the request.

" } }, "ChannelStatus": { @@ -196,11 +196,11 @@ } }, "ChannelStorage": { - "base": "

Where channel data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after creation of the channel.

", + "base": "

Where channel data is stored. You may choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. This cannot be changed after creation of the channel.

", "refs": { - "Channel$storage": "

Where channel data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after creation of the channel.

", - "CreateChannelRequest$channelStorage": "

Where channel data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after creation of the channel.

", - "UpdateChannelRequest$channelStorage": "

Where channel data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after creation of the channel.

" + "Channel$storage": "

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.

", + "CreateChannelRequest$channelStorage": "

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.

", + "UpdateChannelRequest$channelStorage": "

Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the channel is created.

" } }, "ChannelStorageSummary": { @@ -212,7 +212,7 @@ "ChannelSummaries": { "base": null, "refs": { - "ListChannelsResponse$channelSummaries": "

A list of \"ChannelSummary\" objects.

" + "ListChannelsResponse$channelSummaries": "

A list of ChannelSummary objects.

" } }, "ChannelSummary": { @@ -224,13 +224,13 @@ "ComputeType": { "base": null, "refs": { - "ResourceConfiguration$computeType": "

The type of the compute resource used to execute the \"containerAction\". Possible values are: ACU_1 (vCPU=4, memory=16GiB) or ACU_2 (vCPU=8, memory=32GiB).

" + "ResourceConfiguration$computeType": "

The type of the compute resource used to execute the containerAction. Possible values are: ACU_1 (vCPU=4, memory=16 GiB) or ACU_2 (vCPU=8, memory=32 GiB).

" } }, "ContainerDatasetAction": { - "base": "

Information needed to run the \"containerAction\" to produce data set contents.

", + "base": "

Information required to run the containerAction to produce dataset contents.

", "refs": { - "DatasetAction$containerAction": "

Information which allows the system to run a containerized application in order to create the data set contents. The application must be in a Docker container along with any needed support libraries.

" + "DatasetAction$containerAction": "

Information that allows the system to run a containerized application to create the dataset contents. The application must be in a Docker container along with any required support libraries.

" } }, "CreateChannelRequest": { @@ -284,9 +284,9 @@ } }, "CustomerManagedChannelS3Storage": { - "base": "

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the \"retentionPeriod\" parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the channel.

", + "base": "

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the retentionPeriod parameter is ignored. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

", "refs": { - "ChannelStorage$customerManagedS3": "

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the \"retentionPeriod\" parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the channel.

" + "ChannelStorage$customerManagedS3": "

Use this to store channel data in an S3 bucket that you manage. If customer managed storage is selected, the retentionPeriod parameter is ignored. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" } }, "CustomerManagedChannelS3StorageSummary": { @@ -296,9 +296,9 @@ } }, "CustomerManagedDatastoreS3Storage": { - "base": "

Use this to store data store data in an S3 bucket that you manage. When customer managed storage is selected, the \"retentionPeriod\" parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

", + "base": "

Use this to store data store data in an S3 bucket that you manage. When customer-managed storage is selected, the retentionPeriod parameter is ignored. You cannot change the choice of service-managed or customer-managed S3 storage after the data store is created.

", "refs": { - "DatastoreStorage$customerManagedS3": "

Use this to store data store data in an S3 bucket that you manage. When customer managed storage is selected, the \"retentionPeriod\" parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

" + "DatastoreStorage$customerManagedS3": "

Use this to store data store data in an S3 bucket that you manage. When customer managed storage is selected, the retentionPeriod parameter is ignored. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

" } }, "CustomerManagedDatastoreS3StorageSummary": { @@ -314,7 +314,7 @@ } }, "DatasetAction": { - "base": "

A \"DatasetAction\" object that specifies how data set contents are automatically created.

", + "base": "

A DatasetAction object that specifies how data set contents are automatically created.

", "refs": { "DatasetActions$member": null } @@ -323,17 +323,17 @@ "base": null, "refs": { "DatasetAction$actionName": "

The name of the data set action by which data set contents are automatically created.

", - "DatasetActionSummary$actionName": "

The name of the action which automatically creates the data set's contents.

" + "DatasetActionSummary$actionName": "

The name of the action that automatically creates the dataset's contents.

" } }, "DatasetActionSummaries": { "base": null, "refs": { - "DatasetSummary$actions": "

A list of \"DataActionSummary\" objects.

" + "DatasetSummary$actions": "

A list of DataActionSummary objects.

" } }, "DatasetActionSummary": { - "base": "

Information about the action which automatically creates the data set's contents.

", + "base": "

Information about the action that automatically creates the dataset's contents.

", "refs": { "DatasetActionSummaries$member": null } @@ -341,32 +341,32 @@ "DatasetActionType": { "base": null, "refs": { - "DatasetActionSummary$actionType": "

The type of action by which the data set's contents are automatically created.

" + "DatasetActionSummary$actionType": "

The type of action by which the dataset's contents are automatically created.

" } }, "DatasetActions": { "base": null, "refs": { "CreateDatasetRequest$actions": "

A list of actions that create the data set contents.

", - "Dataset$actions": "

The \"DatasetAction\" objects that automatically create the data set contents.

", - "UpdateDatasetRequest$actions": "

A list of \"DatasetAction\" objects.

" + "Dataset$actions": "

The DatasetAction objects that automatically create the data set contents.

", + "UpdateDatasetRequest$actions": "

A list of DatasetAction objects.

" } }, "DatasetArn": { "base": null, "refs": { - "CreateDatasetResponse$datasetArn": "

The ARN of the data set.

", + "CreateDatasetResponse$datasetArn": "

The ARN of the dataset.

", "Dataset$arn": "

The ARN of the data set.

" } }, "DatasetContentDeliveryDestination": { - "base": "

The destination to which data set contents are delivered.

", + "base": "

The destination to which dataset contents are delivered.

", "refs": { - "DatasetContentDeliveryRule$destination": "

The destination to which data set contents are delivered.

" + "DatasetContentDeliveryRule$destination": "

The destination to which dataset contents are delivered.

" } }, "DatasetContentDeliveryRule": { - "base": "

When data set contents are created they are delivered to destination specified here.

", + "base": "

When dataset contents are created, they are delivered to destination specified here.

", "refs": { "DatasetContentDeliveryRules$member": null } @@ -374,15 +374,15 @@ "DatasetContentDeliveryRules": { "base": null, "refs": { - "CreateDatasetRequest$contentDeliveryRules": "

When data set contents are created they are delivered to destinations specified here.

", - "Dataset$contentDeliveryRules": "

When data set contents are created they are delivered to destinations specified here.

", - "UpdateDatasetRequest$contentDeliveryRules": "

When data set contents are created they are delivered to destinations specified here.

" + "CreateDatasetRequest$contentDeliveryRules": "

When dataset contents are created, they are delivered to destinations specified here.

", + "Dataset$contentDeliveryRules": "

When dataset contents are created they are delivered to destinations specified here.

", + "UpdateDatasetRequest$contentDeliveryRules": "

When dataset contents are created, they are delivered to destinations specified here.

" } }, "DatasetContentState": { "base": null, "refs": { - "DatasetContentStatus$state": "

The state of the data set contents. Can be one of \"READY\", \"CREATING\", \"SUCCEEDED\" or \"FAILED\".

" + "DatasetContentStatus$state": "

The state of the data set contents. Can be one of READY, CREATING, SUCCEEDED, or FAILED.

" } }, "DatasetContentStatus": { @@ -399,7 +399,7 @@ } }, "DatasetContentSummary": { - "base": "

Summary information about data set contents.

", + "base": "

Summary information about dataset contents.

", "refs": { "DatasetContentSummaries$member": null } @@ -407,22 +407,23 @@ "DatasetContentVersion": { "base": null, "refs": { - "CreateDatasetContentResponse$versionId": "

The version ID of the data set contents which are being created.

", - "DatasetContentSummary$version": "

The version of the data set contents.

", - "DeleteDatasetContentRequest$versionId": "

The version of the data set whose content is deleted. You can also use the strings \"$LATEST\" or \"$LATEST_SUCCEEDED\" to delete the latest or latest successfully completed data set. If not specified, \"$LATEST_SUCCEEDED\" is the default.

", + "CreateDatasetContentRequest$versionId": "

The version ID of the dataset content. To specify versionId for a dataset content, the dataset must use a DeltaTimer filter.

", + "CreateDatasetContentResponse$versionId": "

The version ID of the dataset contents that are being created.

", + "DatasetContentSummary$version": "

The version of the dataset contents.

", + "DeleteDatasetContentRequest$versionId": "

The version of the dataset whose content is deleted. You can also use the strings \"$LATEST\" or \"$LATEST_SUCCEEDED\" to delete the latest or latest successfully completed data set. If not specified, \"$LATEST_SUCCEEDED\" is the default.

", "GetDatasetContentRequest$versionId": "

The version of the data set whose contents are retrieved. You can also use the strings \"$LATEST\" or \"$LATEST_SUCCEEDED\" to retrieve the contents of the latest or latest successfully completed data set. If not specified, \"$LATEST_SUCCEEDED\" is the default.

" } }, "DatasetContentVersionValue": { - "base": "

The data set whose latest contents are used as input to the notebook or application.

", + "base": "

The dataset whose latest contents are used as input to the notebook or application.

", "refs": { - "Variable$datasetContentVersionValue": "

The value of the variable as a structure that specifies a data set content version.

" + "Variable$datasetContentVersionValue": "

The value of the variable as a structure that specifies a dataset content version.

" } }, "DatasetEntries": { "base": null, "refs": { - "GetDatasetContentResponse$entries": "

A list of \"DatasetEntry\" objects.

" + "GetDatasetContentResponse$entries": "

A list of DatasetEntry objects.

" } }, "DatasetEntry": { @@ -434,18 +435,18 @@ "DatasetName": { "base": null, "refs": { - "CreateDatasetContentRequest$datasetName": "

The name of the data set.

", + "CreateDatasetContentRequest$datasetName": "

The name of the dataset.

", "CreateDatasetRequest$datasetName": "

The name of the data set.

", - "CreateDatasetResponse$datasetName": "

The name of the data set.

", + "CreateDatasetResponse$datasetName": "

The name of the dataset.

", "Dataset$name": "

The name of the data set.

", - "DatasetContentVersionValue$datasetName": "

The name of the data set whose latest contents are used as input to the notebook or application.

", + "DatasetContentVersionValue$datasetName": "

The name of the dataset whose latest contents are used as input to the notebook or application.

", "DatasetSummary$datasetName": "

The name of the data set.

", - "DeleteDatasetContentRequest$datasetName": "

The name of the data set whose content is deleted.

", + "DeleteDatasetContentRequest$datasetName": "

The name of the dataset whose content is deleted.

", "DeleteDatasetRequest$datasetName": "

The name of the data set to delete.

", "DescribeDatasetRequest$datasetName": "

The name of the data set whose information is retrieved.

", "GetDatasetContentRequest$datasetName": "

The name of the data set whose contents are retrieved.

", "ListDatasetContentsRequest$datasetName": "

The name of the data set whose contents information you want to list.

", - "TriggeringDataset$name": "

The name of the data set whose content generation triggers the new data set content generation.

", + "TriggeringDataset$name": "

The name of the dataset whose content generation triggers the new dataset content generation.

", "UpdateDatasetRequest$datasetName": "

The name of the data set to update.

" } }, @@ -459,7 +460,7 @@ "DatasetSummaries": { "base": null, "refs": { - "ListDatasetsResponse$datasetSummaries": "

A list of \"DatasetSummary\" objects.

" + "ListDatasetsResponse$datasetSummaries": "

A list of DatasetSummary objects.

" } }, "DatasetSummary": { @@ -469,7 +470,7 @@ } }, "DatasetTrigger": { - "base": "

The \"DatasetTrigger\" that specifies when the data set is automatically updated.

", + "base": "

The DatasetTrigger that specifies when the data set is automatically updated.

", "refs": { "DatasetTriggers$member": null } @@ -477,10 +478,10 @@ "DatasetTriggers": { "base": null, "refs": { - "CreateDatasetRequest$triggers": "

A list of triggers. A trigger causes data set contents to be populated at a specified time interval or when another data set's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.

", - "Dataset$triggers": "

The \"DatasetTrigger\" objects that specify when the data set is automatically updated.

", - "DatasetSummary$triggers": "

A list of triggers. A trigger causes data set content to be populated at a specified time interval or when another data set is populated. The list of triggers can be empty or contain up to five DataSetTrigger objects

", - "UpdateDatasetRequest$triggers": "

A list of \"DatasetTrigger\" objects. The list can be empty or can contain up to five DataSetTrigger objects.

" + "CreateDatasetRequest$triggers": "

A list of triggers. A trigger causes data set contents to be populated at a specified time interval or when another data set's contents are created. The list of triggers can be empty or contain up to five DataSetTrigger objects.

", + "Dataset$triggers": "

The DatasetTrigger objects that specify when the data set is automatically updated.

", + "DatasetSummary$triggers": "

A list of triggers. A trigger causes data set content to be populated at a specified time interval or when another data set is populated. The list of triggers can be empty or contain up to five DataSetTrigger objects

", + "UpdateDatasetRequest$triggers": "

A list of DatasetTrigger objects. The list can be empty or can contain up to five DatasetTrigger objects.

" } }, "Datastore": { @@ -490,7 +491,7 @@ } }, "DatastoreActivity": { - "base": "

The 'datastore' activity that specifies where to store the processed data.

", + "base": "

The datastore activity that specifies where to store the processed data.

", "refs": { "PipelineActivity$datastore": "

Specifies where to store the processed message data.

" } @@ -518,7 +519,7 @@ "DatastoreStatistics": { "base": "

Statistical information about the data store.

", "refs": { - "DescribeDatastoreResponse$statistics": "

Additional statistical information about the data store. Included if the 'includeStatistics' parameter is set to true in the request.

" + "DescribeDatastoreResponse$statistics": "

Additional statistical information about the data store. Included if the includeStatistics parameter is set to true in the request.

" } }, "DatastoreStatus": { @@ -529,11 +530,11 @@ } }, "DatastoreStorage": { - "base": "

Where data store data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after the data store is created.

", + "base": "

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.

", "refs": { - "CreateDatastoreRequest$datastoreStorage": "

Where data store data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after the data store is created.

", - "Datastore$storage": "

Where data store data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after the data store is created.

", - "UpdateDatastoreRequest$datastoreStorage": "

Where data store data is stored. You may choose one of \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified, the default is \"serviceManagedS3\". This cannot be changed after the data store is created.

" + "CreateDatastoreRequest$datastoreStorage": "

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.

", + "Datastore$storage": "

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default is serviceManagedS3. You cannot change this storage option after the data store is created.

", + "UpdateDatastoreRequest$datastoreStorage": "

Where data store data is stored. You can choose one of serviceManagedS3 or customerManagedS3 storage. If not specified, the default isserviceManagedS3. You cannot change this storage option after the data store is created.

" } }, "DatastoreStorageSummary": { @@ -545,7 +546,7 @@ "DatastoreSummaries": { "base": null, "refs": { - "ListDatastoresResponse$datastoreSummaries": "

A list of \"DatastoreSummary\" objects.

" + "ListDatastoresResponse$datastoreSummaries": "

A list of DatastoreSummary objects.

" } }, "DatastoreSummary": { @@ -585,6 +586,12 @@ "QueryFilter$deltaTime": "

Used to limit data to that which has arrived since the last execution of the action.

" } }, + "DeltaTimeSessionWindowConfiguration": { + "base": "

A structure that contains the configuration information of a delta time session window.

DeltaTime specifies a time interval. You can use DeltaTime to create dataset contents with data that has arrived in the data store since the last execution. For an example of DeltaTime, see Creating a SQL dataset with a delta window (CLI) in the AWS IoT Analytics User Guide.

", + "refs": { + "LateDataRuleConfiguration$deltaTimeSessionWindowConfiguration": "

The information needed to configure a delta time session window.

" + } + }, "DescribeChannelRequest": { "base": null, "refs": { @@ -642,9 +649,9 @@ } }, "DeviceShadowEnrichActivity": { - "base": "

An activity that adds information from the AWS IoT Device Shadows service to a message.

", + "base": "

An activity that adds information from the AWS IoT Device Shadow service to a message.

", "refs": { - "PipelineActivity$deviceShadowEnrich": "

Adds information from the AWS IoT Device Shadows service to a message.

" + "PipelineActivity$deviceShadowEnrich": "

Adds information from the AWS IoT Device Shadow service to a message.

" } }, "DoubleValue": { @@ -663,7 +670,7 @@ "EntryName": { "base": null, "refs": { - "DatasetContentDeliveryRule$entryName": "

The name of the data set content delivery rules entry.

", + "DatasetContentDeliveryRule$entryName": "

The name of the dataset content delivery rules entry.

", "DatasetEntry$entryName": "

The name of the data set item.

" } }, @@ -695,7 +702,7 @@ "FilterExpression": { "base": null, "refs": { - "FilterActivity$filter": "

An expression that looks like a SQL WHERE clause that must return a Boolean value.

" + "FilterActivity$filter": "

An expression that looks like a SQL WHERE clause that must return a Boolean value. Messages that satisfy the condition are passed to the next activity.

" } }, "GetDatasetContentRequest": { @@ -709,27 +716,27 @@ } }, "GlueConfiguration": { - "base": "

Configuration information for coordination with the AWS Glue ETL (extract, transform and load) service.

", + "base": "

Configuration information for coordination with AWS Glue, a fully managed extract, transform and load (ETL) service.

", "refs": { - "S3DestinationConfiguration$glueConfiguration": "

Configuration information for coordination with the AWS Glue ETL (extract, transform and load) service.

" + "S3DestinationConfiguration$glueConfiguration": "

Configuration information for coordination with AWS Glue, a fully managed extract, transform and load (ETL) service.

" } }, "GlueDatabaseName": { "base": null, "refs": { - "GlueConfiguration$databaseName": "

The name of the database in your AWS Glue Data Catalog in which the table is located. (An AWS Glue Data Catalog database contains Glue Data tables.)

" + "GlueConfiguration$databaseName": "

The name of the database in your AWS Glue Data Catalog in which the table is located. An AWS Glue Data Catalog database contains metadata tables.

" } }, "GlueTableName": { "base": null, "refs": { - "GlueConfiguration$tableName": "

The name of the table in your AWS Glue Data Catalog which is used to perform the ETL (extract, transform and load) operations. (An AWS Glue Data Catalog table contains partitioned data and descriptions of data sources and targets.)

" + "GlueConfiguration$tableName": "

The name of the table in your AWS Glue Data Catalog that is used to perform the ETL operations. An AWS Glue Data Catalog table contains partitioned data and descriptions of data sources and targets.

" } }, "Image": { "base": null, "refs": { - "ContainerDatasetAction$image": "

The ARN of the Docker container stored in your account. The Docker container contains an application and needed support libraries and is used to generate data set contents.

" + "ContainerDatasetAction$image": "

The ARN of the Docker container stored in your account. The Docker container contains an application and required support libraries and is used to generate dataset contents.

" } }, "IncludeStatisticsFlag": { @@ -750,15 +757,15 @@ } }, "IotEventsDestinationConfiguration": { - "base": "

Configuration information for delivery of data set contents to AWS IoT Events.

", + "base": "

Configuration information for delivery of dataset contents to AWS IoT Events.

", "refs": { - "DatasetContentDeliveryDestination$iotEventsDestinationConfiguration": "

Configuration information for delivery of data set contents to AWS IoT Events.

" + "DatasetContentDeliveryDestination$iotEventsDestinationConfiguration": "

Configuration information for delivery of dataset contents to AWS IoT Events.

" } }, "IotEventsInputName": { "base": null, "refs": { - "IotEventsDestinationConfiguration$inputName": "

The name of the AWS IoT Events input to which data set contents are delivered.

" + "IotEventsDestinationConfiguration$inputName": "

The name of the AWS IoT Events input to which dataset contents are delivered.

" } }, "LambdaActivity": { @@ -773,6 +780,32 @@ "LambdaActivity$lambdaName": "

The name of the Lambda function that is run on the message.

" } }, + "LateDataRule": { + "base": "

A structure that contains the name and configuration information of a late data rule.

", + "refs": { + "LateDataRules$member": null + } + }, + "LateDataRuleConfiguration": { + "base": "

The information needed to configure a delta time session window.

", + "refs": { + "LateDataRule$ruleConfiguration": "

The information needed to configure the late data rule.

" + } + }, + "LateDataRuleName": { + "base": null, + "refs": { + "LateDataRule$ruleName": "

The name of the late data rule.

" + } + }, + "LateDataRules": { + "base": null, + "refs": { + "CreateDatasetRequest$lateDataRules": "

A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

", + "Dataset$lateDataRules": "

A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

", + "UpdateDatasetRequest$lateDataRules": "

A list of data rules that send notifications to Amazon CloudWatch, when data arrives late. To specify lateDataRules, the dataset must use a DeltaTimer filter.

" + } + }, "LimitExceededException": { "base": "

The command caused an internal limit to be exceeded.

", "refs": { @@ -853,7 +886,7 @@ "LoggingLevel": { "base": null, "refs": { - "LoggingOptions$level": "

The logging level. Currently, only \"ERROR\" is supported.

" + "LoggingOptions$level": "

The logging level. Currently, only ERROR is supported.

" } }, "LoggingOptions": { @@ -878,7 +911,7 @@ "MaxMessages": { "base": null, "refs": { - "SampleChannelDataRequest$maxMessages": "

The number of sample messages to be retrieved. The limit is 10, the default is also 10.

" + "SampleChannelDataRequest$maxMessages": "

The number of sample messages to be retrieved. The limit is 10. The default is also 10.

" } }, "MaxResults": { @@ -894,7 +927,7 @@ "MaxVersions": { "base": null, "refs": { - "VersioningConfiguration$maxVersions": "

How many versions of data set contents will be kept. The \"unlimited\" parameter must be false.

" + "VersioningConfiguration$maxVersions": "

How many versions of dataset contents are kept. The unlimited parameter must be false.

" } }, "Message": { @@ -906,14 +939,14 @@ "MessageId": { "base": null, "refs": { - "BatchPutMessageErrorEntry$messageId": "

The ID of the message that caused the error. (See the value corresponding to the \"messageId\" key in the message object.)

", - "Message$messageId": "

The ID you wish to assign to the message. Each \"messageId\" must be unique within each batch sent.

" + "BatchPutMessageErrorEntry$messageId": "

The ID of the message that caused the error. See the value corresponding to the messageId key in the message object.

", + "Message$messageId": "

The ID you want to assign to the message. Each messageId must be unique within each batch sent.

" } }, "MessagePayload": { "base": null, "refs": { - "Message$payload": "

The payload of the message. This may be a JSON string or a Base64-encoded string representing binary data (in which case you must decode it by means of a pipeline activity).

", + "Message$payload": "

The payload of the message. This can be a JSON string or a base64-encoded string representing binary data, in which case you must decode it by means of a pipeline activity.

", "MessagePayloads$member": null } }, @@ -928,7 +961,7 @@ "Messages": { "base": null, "refs": { - "BatchPutMessageRequest$messages": "

The list of messages to be sent. Each message has format: '{ \"messageId\": \"string\", \"payload\": \"string\"}'.

Note that the field names of message payloads (data) that you send to AWS IoT Analytics:

For example, {\"temp_01\": 29} or {\"_temp_01\": 29} are valid, but {\"temp-01\": 29}, {\"01_temp\": 29} or {\"__temp_01\": 29} are invalid in message payloads.

" + "BatchPutMessageRequest$messages": "

The list of messages to be sent. Each message has the format: { \"messageId\": \"string\", \"payload\": \"string\"}.

The field names of message payloads (data) that you send to AWS IoT Analytics:

For example, {\"temp_01\": 29} or {\"_temp_01\": 29} are valid, but {\"temp-01\": 29}, {\"01_temp\": 29} or {\"__temp_01\": 29} are invalid in message payloads.

" } }, "NextToken": { @@ -949,13 +982,13 @@ "OffsetSeconds": { "base": null, "refs": { - "DeltaTime$offsetSeconds": "

The number of seconds of estimated \"in flight\" lag time of message data. When you create data set contents using message data from a specified time frame, some message data may still be \"in flight\" when processing begins, and so will not arrive in time to be processed. Use this field to make allowances for the \"in flight\" time of your message data, so that data not processed from a previous time frame will be included with the next time frame. Without this, missed message data would be excluded from processing during the next time frame as well, because its timestamp places it within the previous time frame.

" + "DeltaTime$offsetSeconds": "

The number of seconds of estimated in-flight lag time of message data. When you create dataset contents using message data from a specified timeframe, some message data might still be in flight when processing begins, and so do not arrive in time to be processed. Use this field to make allowances for the in flight time of your message data, so that data not processed from a previous timeframe is included with the next timeframe. Otherwise, missed message data would be excluded from processing during the next timeframe too, because its timestamp places it within the previous timeframe.

" } }, "OutputFileName": { "base": null, "refs": { - "OutputFileUriValue$fileName": "

The URI of the location where data set contents are stored, usually the URI of a file in an S3 bucket.

" + "OutputFileUriValue$fileName": "

The URI of the location where dataset contents are stored, usually the URI of a file in an S3 bucket.

" } }, "OutputFileUriValue": { @@ -967,22 +1000,22 @@ "Pipeline": { "base": "

Contains information about a pipeline.

", "refs": { - "DescribePipelineResponse$pipeline": "

A \"Pipeline\" object that contains information about the pipeline.

" + "DescribePipelineResponse$pipeline": "

A Pipeline object that contains information about the pipeline.

" } }, "PipelineActivities": { "base": null, "refs": { - "CreatePipelineRequest$pipelineActivities": "

A list of \"PipelineActivity\" objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity, for example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

", + "CreatePipelineRequest$pipelineActivities": "

A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

", "Pipeline$activities": "

The activities that perform transformations on the messages.

", - "UpdatePipelineRequest$pipelineActivities": "

A list of \"PipelineActivity\" objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity, for example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

" + "UpdatePipelineRequest$pipelineActivities": "

A list of PipelineActivity objects. Activities perform transformations on your messages, such as removing, renaming or adding message attributes; filtering messages based on attribute values; invoking your Lambda functions on messages for advanced processing; or performing mathematical transformations to normalize device data.

The list can be 2-25 PipelineActivity objects and must contain both a channel and a datastore activity. Each entry in the list must contain only one activity. For example:

pipelineActivities = [ { \"channel\": { ... } }, { \"lambda\": { ... } }, ... ]

" } }, "PipelineActivity": { "base": "

An activity that performs a transformation on a message.

", "refs": { "PipelineActivities$member": null, - "RunPipelineActivityRequest$pipelineActivity": "

The pipeline activity that is run. This must not be a 'channel' activity or a 'datastore' activity because these activities are used in a pipeline only to load the original message and to store the (possibly) transformed message. If a 'lambda' activity is specified, only short-running Lambda functions (those with a timeout of less than 30 seconds or less) can be used.

" + "RunPipelineActivityRequest$pipelineActivity": "

The pipeline activity that is run. This must not be a channel activity or a datastore activity because these activities are used in a pipeline only to load the original message and to store the (possibly) transformed message. If a lambda activity is specified, only short-running Lambda functions (those with a timeout of less than 30 seconds or less) can be used.

" } }, "PipelineArn": { @@ -1009,7 +1042,7 @@ "PipelineSummaries": { "base": null, "refs": { - "ListPipelinesResponse$pipelineSummaries": "

A list of \"PipelineSummary\" objects.

" + "ListPipelinesResponse$pipelineSummaries": "

A list of PipelineSummary objects.

" } }, "PipelineSummary": { @@ -1021,7 +1054,7 @@ "PresignedURI": { "base": null, "refs": { - "DatasetEntry$dataURI": "

The pre-signed URI of the data set item.

" + "DatasetEntry$dataURI": "

The presigned URI of the data set item.

" } }, "PutLoggingOptionsRequest": { @@ -1030,7 +1063,7 @@ } }, "QueryFilter": { - "base": "

Information which is used to filter message data, to segregate it according to the time frame in which it arrives.

", + "base": "

Information that is used to filter message data, to segregate it according to the timeframe in which it arrives.

", "refs": { "QueryFilters$member": null } @@ -1038,7 +1071,7 @@ "QueryFilters": { "base": null, "refs": { - "SqlQueryDatasetAction$filters": "

Pre-filters applied to message data.

" + "SqlQueryDatasetAction$filters": "

Prefilters applied to message data.

" } }, "Reason": { @@ -1056,8 +1089,8 @@ "ReprocessingId": { "base": null, "refs": { - "CancelPipelineReprocessingRequest$reprocessingId": "

The ID of the reprocessing task (returned by \"StartPipelineReprocessing\").

", - "ReprocessingSummary$id": "

The 'reprocessingId' returned by \"StartPipelineReprocessing\".

", + "CancelPipelineReprocessingRequest$reprocessingId": "

The ID of the reprocessing task (returned by StartPipelineReprocessing).

", + "ReprocessingSummary$id": "

The reprocessingId returned by StartPipelineReprocessing.

", "StartPipelineReprocessingResponse$reprocessingId": "

The ID of the pipeline reprocessing activity that was started.

" } }, @@ -1094,9 +1127,9 @@ } }, "ResourceConfiguration": { - "base": "

The configuration of the resource used to execute the \"containerAction\".

", + "base": "

The configuration of the resource used to execute the containerAction.

", "refs": { - "ContainerDatasetAction$resourceConfiguration": "

Configuration of the resource which executes the \"containerAction\".

" + "ContainerDatasetAction$resourceConfiguration": "

Configuration of the resource that executes the containerAction.

" } }, "ResourceNotFoundException": { @@ -1108,38 +1141,38 @@ "base": "

How long, in days, message data is kept.

", "refs": { "Channel$retentionPeriod": "

How long, in days, message data is kept for the channel.

", - "CreateChannelRequest$retentionPeriod": "

How long, in days, message data is kept for the channel. When \"customerManagedS3\" storage is selected, this parameter is ignored.

", + "CreateChannelRequest$retentionPeriod": "

How long, in days, message data is kept for the channel. When customerManagedS3 storage is selected, this parameter is ignored.

", "CreateChannelResponse$retentionPeriod": "

How long, in days, message data is kept for the channel.

", - "CreateDatasetRequest$retentionPeriod": "

[Optional] How long, in days, versions of data set contents are kept for the data set. If not specified or set to null, versions of data set contents are retained for at most 90 days. The number of versions of data set contents retained is determined by the versioningConfiguration parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions)

", - "CreateDatasetResponse$retentionPeriod": "

How long, in days, data set contents are kept for the data set.

", - "CreateDatastoreRequest$retentionPeriod": "

How long, in days, message data is kept for the data store. When \"customerManagedS3\" storage is selected, this parameter is ignored.

", + "CreateDatasetRequest$retentionPeriod": "

Optional. How long, in days, versions of dataset contents are kept for the dataset. If not specified or set to null, versions of dataset contents are retained for at most 90 days. The number of versions of dataset contents retained is determined by the versioningConfiguration parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

", + "CreateDatasetResponse$retentionPeriod": "

How long, in days, dataset contents are kept for the dataset.

", + "CreateDatastoreRequest$retentionPeriod": "

How long, in days, message data is kept for the data store. When customerManagedS3 storage is selected, this parameter is ignored.

", "CreateDatastoreResponse$retentionPeriod": "

How long, in days, message data is kept for the data store.

", - "Dataset$retentionPeriod": "

[Optional] How long, in days, message data is kept for the data set.

", - "Datastore$retentionPeriod": "

How long, in days, message data is kept for the data store. When \"customerManagedS3\" storage is selected, this parameter is ignored.

", + "Dataset$retentionPeriod": "

Optional. How long, in days, message data is kept for the data set.

", + "Datastore$retentionPeriod": "

How long, in days, message data is kept for the data store. When customerManagedS3 storage is selected, this parameter is ignored.

", "UpdateChannelRequest$retentionPeriod": "

How long, in days, message data is kept for the channel. The retention period cannot be updated if the channel's S3 storage is customer-managed.

", - "UpdateDatasetRequest$retentionPeriod": "

How long, in days, data set contents are kept for the data set.

", + "UpdateDatasetRequest$retentionPeriod": "

How long, in days, dataset contents are kept for the dataset.

", "UpdateDatastoreRequest$retentionPeriod": "

How long, in days, message data is kept for the data store. The retention period cannot be updated if the data store's S3 storage is customer-managed.

" } }, "RetentionPeriodInDays": { "base": null, "refs": { - "RetentionPeriod$numberOfDays": "

The number of days that message data is kept. The \"unlimited\" parameter must be false.

" + "RetentionPeriod$numberOfDays": "

The number of days that message data is kept. The unlimited parameter must be false.

" } }, "RoleArn": { "base": null, "refs": { - "ContainerDatasetAction$executionRoleArn": "

The ARN of the role which gives permission to the system to access needed resources in order to run the \"containerAction\". This includes, at minimum, permission to retrieve the data set contents which are the input to the containerized application.

", - "CustomerManagedChannelS3Storage$roleArn": "

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

", - "CustomerManagedChannelS3StorageSummary$roleArn": "

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

", - "CustomerManagedDatastoreS3Storage$roleArn": "

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

", - "CustomerManagedDatastoreS3StorageSummary$roleArn": "

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

", + "ContainerDatasetAction$executionRoleArn": "

The ARN of the role that gives permission to the system to access required resources to run the containerAction. This includes, at minimum, permission to retrieve the dataset contents that are the input to the containerized application.

", + "CustomerManagedChannelS3Storage$roleArn": "

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

", + "CustomerManagedChannelS3StorageSummary$roleArn": "

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

", + "CustomerManagedDatastoreS3Storage$roleArn": "

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

", + "CustomerManagedDatastoreS3StorageSummary$roleArn": "

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 resources.

", "DeviceRegistryEnrichActivity$roleArn": "

The ARN of the role that allows access to the device's registry information.

", "DeviceShadowEnrichActivity$roleArn": "

The ARN of the role that allows access to the device's shadow.

", - "IotEventsDestinationConfiguration$roleArn": "

The ARN of the role which grants AWS IoT Analytics permission to deliver data set contents to an AWS IoT Events input.

", + "IotEventsDestinationConfiguration$roleArn": "

The ARN of the role that grants AWS IoT Analytics permission to deliver dataset contents to an AWS IoT Events input.

", "LoggingOptions$roleArn": "

The ARN of the role that grants permission to AWS IoT Analytics to perform logging.

", - "S3DestinationConfiguration$roleArn": "

The ARN of the role which grants AWS IoT Analytics permission to interact with your Amazon S3 and AWS Glue resources.

" + "S3DestinationConfiguration$roleArn": "

The ARN of the role that grants AWS IoT Analytics permission to interact with your Amazon S3 and AWS Glue resources.

" } }, "RunPipelineActivityRequest": { @@ -1153,18 +1186,18 @@ } }, "S3DestinationConfiguration": { - "base": "

Configuration information for delivery of data set contents to Amazon S3.

", + "base": "

Configuration information for delivery of dataset contents to Amazon Simple Storage Service (Amazon S3).

", "refs": { - "DatasetContentDeliveryDestination$s3DestinationConfiguration": "

Configuration information for delivery of data set contents to Amazon S3.

" + "DatasetContentDeliveryDestination$s3DestinationConfiguration": "

Configuration information for delivery of dataset contents to Amazon S3.

" } }, "S3KeyPrefix": { "base": null, "refs": { - "CustomerManagedChannelS3Storage$keyPrefix": "

[Optional] The prefix used to create the keys of the channel data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a '/'.

", - "CustomerManagedChannelS3StorageSummary$keyPrefix": "

[Optional] The prefix used to create the keys of the channel data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a '/'.

", - "CustomerManagedDatastoreS3Storage$keyPrefix": "

[Optional] The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a '/'.

", - "CustomerManagedDatastoreS3StorageSummary$keyPrefix": "

[Optional] The prefix used to create the keys of the data store data objects. Each object in an Amazon S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a '/'.

" + "CustomerManagedChannelS3Storage$keyPrefix": "

Optional. The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

", + "CustomerManagedChannelS3StorageSummary$keyPrefix": "

Optional. The prefix used to create the keys of the channel data objects. Each object in an S3 bucket has a key that is its unique identifier within the bucket (each object in a bucket has exactly one key). The prefix must end with a forward slash (/).

", + "CustomerManagedDatastoreS3Storage$keyPrefix": "

Optional. The prefix used to create the keys of the data store data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

", + "CustomerManagedDatastoreS3StorageSummary$keyPrefix": "

Optional. The prefix used to create the keys of the data store data objects. Each object in an S3 bucket has a key that is its unique identifier in the bucket. Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).

" } }, "SampleChannelDataRequest": { @@ -1180,13 +1213,13 @@ "Schedule": { "base": "

The schedule for when to trigger an update.

", "refs": { - "DatasetTrigger$schedule": "

The \"Schedule\" when the trigger is initiated.

" + "DatasetTrigger$schedule": "

The Schedule when the trigger is initiated.

" } }, "ScheduleExpression": { "base": null, "refs": { - "Schedule$expression": "

The expression that defines when to trigger an update. For more information, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide.

" + "Schedule$expression": "

The expression that defines when to trigger an update. For more information, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide.

" } }, "SelectAttributesActivity": { @@ -1196,27 +1229,27 @@ } }, "ServiceManagedChannelS3Storage": { - "base": "

Use this to store channel data in an S3 bucket managed by the AWS IoT Analytics service. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the channel.

", + "base": "

Use this to store channel data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

", "refs": { - "ChannelStorage$serviceManagedS3": "

Use this to store channel data in an S3 bucket managed by the AWS IoT Analytics service. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the channel.

" + "ChannelStorage$serviceManagedS3": "

Use this to store channel data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the channel is created.

" } }, "ServiceManagedChannelS3StorageSummary": { - "base": "

Used to store channel data in an S3 bucket managed by the AWS IoT Analytics service.

", + "base": "

Used to store channel data in an S3 bucket managed by AWS IoT Analytics.

", "refs": { - "ChannelStorageSummary$serviceManagedS3": "

Used to store channel data in an S3 bucket managed by the AWS IoT Analytics service.

" + "ChannelStorageSummary$serviceManagedS3": "

Used to store channel data in an S3 bucket managed by AWS IoT Analytics.

" } }, "ServiceManagedDatastoreS3Storage": { - "base": "

Use this to store data store data in an S3 bucket managed by the AWS IoT Analytics service. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

", + "base": "

Use this to store data store data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the data store is created.

", "refs": { - "DatastoreStorage$serviceManagedS3": "

Use this to store data store data in an S3 bucket managed by the AWS IoT Analytics service. The choice of service-managed or customer-managed S3 storage cannot be changed after creation of the data store.

" + "DatastoreStorage$serviceManagedS3": "

Use this to store data store data in an S3 bucket managed by AWS IoT Analytics. You cannot change the choice of service-managed or customer-managed S3 storage after the data store is created.

" } }, "ServiceManagedDatastoreS3StorageSummary": { - "base": "

Used to store data store data in an S3 bucket managed by the AWS IoT Analytics service.

", + "base": "

Used to store data store data in an S3 bucket managed by AWS IoT Analytics.

", "refs": { - "DatastoreStorageSummary$serviceManagedS3": "

Used to store data store data in an S3 bucket managed by the AWS IoT Analytics service.

" + "DatastoreStorageSummary$serviceManagedS3": "

Used to store data store data in an S3 bucket managed by AWS IoT Analytics.

" } }, "ServiceUnavailableException": { @@ -1224,10 +1257,16 @@ "refs": { } }, + "SessionTimeoutInMinutes": { + "base": null, + "refs": { + "DeltaTimeSessionWindowConfiguration$timeoutInMinutes": "

A time interval. You can use timeoutInMinutes so that AWS IoT Analytics can batch up late data notifications that have been generated since the last execution. AWS IoT Analytics sends one batch of notifications to Amazon CloudWatch Events at one time.

For more information about how to write a timestamp expression, see Date and Time Functions and Operators, in the Presto 0.172 Documentation.

" + } + }, "SizeInBytes": { "base": null, "refs": { - "EstimatedResourceSize$estimatedSizeInBytes": "

The estimated size of the resource in bytes.

" + "EstimatedResourceSize$estimatedSizeInBytes": "

The estimated size of the resource, in bytes.

" } }, "SqlQuery": { @@ -1239,7 +1278,7 @@ "SqlQueryDatasetAction": { "base": "

The SQL query to modify the message.

", "refs": { - "DatasetAction$queryAction": "

An \"SqlQueryDatasetAction\" object that uses an SQL query to automatically create data set contents.

" + "DatasetAction$queryAction": "

An SqlQueryDatasetAction object that uses an SQL query to automatically create data set contents.

" } }, "StartPipelineReprocessingRequest": { @@ -1266,7 +1305,7 @@ } }, "Tag": { - "base": "

A set of key/value pairs which are used to manage the resource.

", + "base": "

A set of key-value pairs that are used to manage the resource.

", "refs": { "TagList$member": null } @@ -1291,7 +1330,7 @@ "CreateDatasetRequest$tags": "

Metadata which can be used to manage the data set.

", "CreateDatastoreRequest$tags": "

Metadata which can be used to manage the data store.

", "CreatePipelineRequest$tags": "

Metadata which can be used to manage the pipeline.

", - "ListTagsForResourceResponse$tags": "

The tags (metadata) which you have assigned to the resource.

", + "ListTagsForResourceResponse$tags": "

The tags (metadata) that you have assigned to the resource.

", "TagResourceRequest$tags": "

The new or modified tags for the resource.

" } }, @@ -1319,7 +1358,7 @@ "TimeExpression": { "base": null, "refs": { - "DeltaTime$timeExpression": "

An expression by which the time of the message data may be determined. This may be the name of a timestamp field, or a SQL expression which is used to derive the time the message data was generated.

" + "DeltaTime$timeExpression": "

An expression by which the time of the message data might be determined. This can be the name of a timestamp field or a SQL expression that is used to derive the time the message data was generated.

" } }, "Timestamp": { @@ -1327,23 +1366,27 @@ "refs": { "Channel$creationTime": "

When the channel was created.

", "Channel$lastUpdateTime": "

When the channel was last updated.

", + "Channel$lastMessageArrivalTime": "

The last time when a new message arrived in the channel.

AWS IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

", "ChannelSummary$creationTime": "

When the channel was created.

", "ChannelSummary$lastUpdateTime": "

The last time the channel was updated.

", + "ChannelSummary$lastMessageArrivalTime": "

The last time when a new message arrived in the channel.

AWS IoT Analytics updates this value at most once per minute for one channel. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

", "Dataset$creationTime": "

When the data set was created.

", "Dataset$lastUpdateTime": "

The last time the data set was updated.

", - "DatasetContentSummary$creationTime": "

The actual time the creation of the data set contents was started.

", - "DatasetContentSummary$scheduleTime": "

The time the creation of the data set contents was scheduled to start.

", + "DatasetContentSummary$creationTime": "

The actual time the creation of the dataset contents was started.

", + "DatasetContentSummary$scheduleTime": "

The time the creation of the dataset contents was scheduled to start.

", "DatasetContentSummary$completionTime": "

The time the dataset content status was updated to SUCCEEDED or FAILED.

", "DatasetSummary$creationTime": "

The time the data set was created.

", "DatasetSummary$lastUpdateTime": "

The last time the data set was updated.

", "Datastore$creationTime": "

When the data store was created.

", "Datastore$lastUpdateTime": "

The last time the data store was updated.

", + "Datastore$lastMessageArrivalTime": "

The last time when a new message arrived in the data store.

AWS IoT Analytics updates this value at most once per minute for one data store. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

", "DatastoreSummary$creationTime": "

When the data store was created.

", "DatastoreSummary$lastUpdateTime": "

The last time the data store was updated.

", + "DatastoreSummary$lastMessageArrivalTime": "

The last time when a new message arrived in the data store.

AWS IoT Analytics updates this value at most once per minute for one data store. Hence, the lastMessageArrivalTime value is an approximation.

This feature only applies to messages that arrived in the data store after October 23, 2020.

", "EstimatedResourceSize$estimatedOn": "

The time when the estimate of the size of the resource was made.

", "GetDatasetContentResponse$timestamp": "

The time when the request was made.

", - "ListDatasetContentsRequest$scheduledOnOrAfter": "

A filter to limit results to those data set contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", - "ListDatasetContentsRequest$scheduledBefore": "

A filter to limit results to those data set contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", + "ListDatasetContentsRequest$scheduledOnOrAfter": "

A filter to limit results to those data set contents whose creation is scheduled on or after the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", + "ListDatasetContentsRequest$scheduledBefore": "

A filter to limit results to those data set contents whose creation is scheduled before the given time. See the field triggers.schedule in the CreateDataset request. (timestamp)

", "Pipeline$creationTime": "

When the pipeline was created.

", "Pipeline$lastUpdateTime": "

The last time the pipeline was updated.

", "PipelineSummary$creationTime": "

When the pipeline was created.

", @@ -1352,7 +1395,7 @@ } }, "TriggeringDataset": { - "base": "

Information about the data set whose content generation triggers the new data set content generation.

", + "base": "

Information about the dataset whose content generation triggers the new dataset content generation.

", "refs": { "DatasetTrigger$dataset": "

The data set whose content creation triggers the creation of this data set's contents.

" } @@ -1366,7 +1409,7 @@ "UnlimitedVersioning": { "base": null, "refs": { - "VersioningConfiguration$unlimited": "

If true, unlimited versions of data set contents will be kept.

" + "VersioningConfiguration$unlimited": "

If true, unlimited versions of dataset contents are kept.

" } }, "UntagResourceRequest": { @@ -1400,7 +1443,7 @@ } }, "Variable": { - "base": "

An instance of a variable to be passed to the \"containerAction\" execution. Each variable must have a name and a value given by one of \"stringValue\", \"datasetContentVersionValue\", or \"outputFileUriValue\".

", + "base": "

An instance of a variable to be passed to the containerAction execution. Each variable must have a name and a value given by one of stringValue, datasetContentVersionValue, or outputFileUriValue.

", "refs": { "Variables$member": null } @@ -1414,21 +1457,21 @@ "Variables": { "base": null, "refs": { - "ContainerDatasetAction$variables": "

The values of variables used within the context of the execution of the containerized application (basically, parameters passed to the application). Each variable must have a name and a value given by one of \"stringValue\", \"datasetContentVersionValue\", or \"outputFileUriValue\".

" + "ContainerDatasetAction$variables": "

The values of variables used in the context of the execution of the containerized application (basically, parameters passed to the application). Each variable must have a name and a value given by one of stringValue, datasetContentVersionValue, or outputFileUriValue.

" } }, "VersioningConfiguration": { - "base": "

Information about the versioning of data set contents.

", + "base": "

Information about the versioning of dataset contents.

", "refs": { - "CreateDatasetRequest$versioningConfiguration": "

[Optional] How many versions of data set contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the \"retentionPeriod\" parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions)

", - "Dataset$versioningConfiguration": "

[Optional] How many versions of data set contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the \"retentionPeriod\" parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions)

", - "UpdateDatasetRequest$versioningConfiguration": "

[Optional] How many versions of data set contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the \"retentionPeriod\" parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions)

" + "CreateDatasetRequest$versioningConfiguration": "

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

", + "Dataset$versioningConfiguration": "

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

", + "UpdateDatasetRequest$versioningConfiguration": "

Optional. How many versions of dataset contents are kept. If not specified or set to null, only the latest version plus the latest succeeded version (if they are different) are kept for the time period specified by the retentionPeriod parameter. For more information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets in the AWS IoT Analytics User Guide.

" } }, "VolumeSizeInGB": { "base": null, "refs": { - "ResourceConfiguration$volumeSizeInGB": "

The size (in GB) of the persistent storage available to the resource instance used to execute the \"containerAction\" (min: 1, max: 50).

" + "ResourceConfiguration$volumeSizeInGB": "

The size, in GB, of the persistent storage available to the resource instance used to execute the containerAction (min: 1, max: 50).

" } }, "errorMessage": { diff --git a/models/apis/macie2/2020-01-01/api-2.json b/models/apis/macie2/2020-01-01/api-2.json index c2cca817862..6be573ccf99 100644 --- a/models/apis/macie2/2020-01-01/api-2.json +++ b/models/apis/macie2/2020-01-01/api-2.json @@ -4584,6 +4584,10 @@ "Record": { "type": "structure", "members": { + "jsonPath": { + "shape": "__string", + "locationName": "jsonPath" + }, "recordIndex": { "shape": "__long", "locationName": "recordIndex" diff --git a/models/apis/macie2/2020-01-01/docs-2.json b/models/apis/macie2/2020-01-01/docs-2.json index 30186ff112e..1f7e0511c3f 100644 --- a/models/apis/macie2/2020-01-01/docs-2.json +++ b/models/apis/macie2/2020-01-01/docs-2.json @@ -214,7 +214,7 @@ "Cells" : { "base" : "

Specifies the location of occurrences of sensitive data in a Microsoft Excel workbook, CSV file, or TSV file.

", "refs" : { - "Occurrences$Cells" : "

An array of objects, one for each occurrence of sensitive data in a Microsoft Excel workbook, CSV file, or TSV file. Each object specifies the cell that contains the data. This value is null for all other types of files.

" + "Occurrences$Cells" : "

An array of objects, one for each occurrence of sensitive data in a Microsoft Excel workbook, CSV file, or TSV file. Each object specifies the cell or field that contains the data. This value is null for all other types of files.

" } }, "ClassificationDetails" : { @@ -641,7 +641,7 @@ "base" : "

The operator to use in a condition. Valid values are:

", "refs" : { "ListJobsFilterTerm$Comparator" : "

The operator to use to filter the results.

", - "SimpleScopeTerm$Comparator" : "

The operator to use in the condition. Valid operators for each supported property (key) are:

", + "SimpleScopeTerm$Comparator" : "

The operator to use in the condition. Valid operators for each supported property (key) are:

", "TagScopeTerm$Comparator" : "

The operator to use in the condition. Valid operators are EQ (equals) or NE (not equals).

" } }, @@ -668,9 +668,9 @@ "JobStatus" : { "base" : "

The status of a classification job. Possible values are:

", "refs" : { - "DescribeClassificationJobResponse$JobStatus" : "

The current status of the job. Possible values are:

", - "JobSummary$JobStatus" : "

The current status of the job. Possible values are:

", - "UpdateClassificationJobRequest$JobStatus" : "

The new status for the job. Valid values are:

" + "DescribeClassificationJobResponse$JobStatus" : "

The current status of the job. Possible values are:

", + "JobSummary$JobStatus" : "

The current status of the job. Possible values are:

", + "UpdateClassificationJobRequest$JobStatus" : "

The new status for the job. Valid values are:

" } }, "JobSummary" : { @@ -855,7 +855,7 @@ "refs" : { } }, "Range" : { - "base" : "

Provides details about the location of an occurrence of sensitive data in an Adobe Portable Document Format file, Apache Avro object container, Microsoft Word document, or non-binary text file.

", + "base" : "

Provides details about the location of an occurrence of sensitive data in an Adobe Portable Document Format file, Microsoft Word document, or non-binary text file.

", "refs" : { "Page$LineRange" : "

The line that contains the data, and the position of the data on that line.

", "Page$OffsetRange" : "

The position of the data on the page, relative to the beginning of the page.

", @@ -863,14 +863,14 @@ } }, "Ranges" : { - "base" : "

Provides details about the location of occurrences of sensitive data in an Adobe Portable Document Format file, Apache Avro object container, Microsoft Word document, or non-binary text file.

", + "base" : "

Provides details about the location of occurrences of sensitive data in an Adobe Portable Document Format file, Microsoft Word document, or non-binary text file.

", "refs" : { - "Occurrences$LineRanges" : "

An array of objects, one for each occurrence of sensitive data in an Apache Avro object container, Microsoft Word document, or non-binary text file, such as an HTML, JSON, TXT, or XML file. Each object specifies the line that contains the data, and the position of the data on that line.

This value is often null for file types that are supported by Cell, Page, or Record objects. Exceptions are the locations of: full names and addresses in a Microsoft Excel workbook, CSV file, or TSV file; data in unstructured sections of an otherwise structured file, such as a comment in a file; and, data in a malformed file that Amazon Macie analyzes as plain text.

", + "Occurrences$LineRanges" : "

An array of objects, one for each occurrence of sensitive data in a Microsoft Word document or non-binary text file, such as an HTML, JSON, TXT, or XML file. Each object specifies the line that contains the data, and the position of the data on that line.

This value is often null for file types that are supported by Cell, Page, or Record objects. Exceptions are the locations of: data in unstructured sections of an otherwise structured file, such as a comment in a file; and, data in a malformed file that Amazon Macie analyzes as plain text.

", "Occurrences$OffsetRanges" : "

An array of objects, one for each occurrence of sensitive data in a binary text file. Each object specifies the position of the data relative to the beginning of the file.

This value is typically null. For binary text files, Macie adds location data to a lineRanges.Range or Page object, depending on the file type.

" } }, "Record" : { - "base" : "

Specifies the location of an occurrence of sensitive data in an Apache Parquet file.

", + "base" : "

Specifies the location of an occurrence of sensitive data in an Apache Avro object container or Apache Parquet file.

", "refs" : { "Records$member" : null } @@ -878,7 +878,7 @@ "Records" : { "base" : "

Specifies the location of occurrences of sensitive data in an Apache Parquet file.

", "refs" : { - "Occurrences$Records" : "

An array of objects, one for each occurrence of sensitive data in an Apache Parquet file. Each object specifies the row that contains the data. This value is null for all other types of files.

" + "Occurrences$Records" : "

An array of objects, one for each occurrence of sensitive data in an Apache Avro object container or Apache Parquet file. Each object specifies the field or record that contains the data. This value is null for all other types of files.

" } }, "RelationshipStatus" : { @@ -1023,7 +1023,7 @@ "SharedAccess" : { "base" : null, "refs" : { - "BucketMetadata$SharedAccess" : "

Specifies whether the bucket is shared with another AWS account. Possible values are:

" + "BucketMetadata$SharedAccess" : "

Specifies whether the bucket is shared with another AWS account. Possible values are:

" } }, "SimpleScopeTerm" : { @@ -1220,10 +1220,10 @@ } }, "UserPausedDetails" : { - "base" : "

Provides information about when a classification job was paused and when it will expire and be cancelled if it isn’t resumed. This object is present only if a job’s current status (jobStatus) is USER_PAUSED.

", + "base" : "

Provides information about when a classification job was paused and when it will expire and be cancelled if it isn't resumed. This object is present only if a job's current status (jobStatus) is USER_PAUSED.

", "refs" : { - "DescribeClassificationJobResponse$UserPausedDetails" : "

If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job will expire and be cancelled if it isn’t resumed. This value is present only if the value for jobStatus is USER_PAUSED.

", - "JobSummary$UserPausedDetails" : "

If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job will expire and be cancelled if it isn’t resumed. This value is present only if the value for jobStatus is USER_PAUSED.

" + "DescribeClassificationJobResponse$UserPausedDetails" : "

If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job will expire and be cancelled if it isn't resumed. This value is present only if the value for jobStatus is USER_PAUSED.

", + "JobSummary$UserPausedDetails" : "

If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job will expire and be cancelled if it isn't resumed. This value is present only if the value for jobStatus is USER_PAUSED.

" } }, "ValidationException" : { @@ -1438,7 +1438,7 @@ "CreateCustomDataIdentifierRequest$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 4 - 90 characters. Keywords aren't case sensitive.

", "CreateInvitationsRequest$AccountIds" : "

An array that lists AWS account IDs, one for each account to send the invitation to.

", "CriterionAdditionalProperties$Eq" : "

An equal to condition to apply to a specified property value for findings.

", - "CriterionAdditionalProperties$EqExactMatch" : "

A condition that requires an array field on a finding to exactly match the specified property values. You can use this operator with the following properties:

", + "CriterionAdditionalProperties$EqExactMatch" : "

A condition that requires an array field of a finding to exactly match the specified property values. You can use this operator with the following properties: customDataIdentifiers.detections.arn, customDataIdentifiers.detections.name, resourcesAffected.s3Bucket.tags.key, resourcesAffected.s3Bucket.tags.value, resourcesAffected.s3Object.tags.key, resourcesAffected.s3Object.tags.value, sensitiveData.category, and sensitiveData.detections.type.

", "CriterionAdditionalProperties$Neq" : "

A not equal to condition to apply to a specified property value for findings.

", "DeclineInvitationsRequest$AccountIds" : "

An array that lists AWS account IDs, one for each account that sent an invitation to decline.

", "DeleteInvitationsRequest$AccountIds" : "

An array that lists AWS account IDs, one for each account that sent an invitation to delete.

", @@ -1450,7 +1450,7 @@ "ListJobsFilterTerm$Values" : "

An array that lists one or more values to use to filter the results.

", "ReplicationDetails$ReplicationAccounts" : "

An array of AWS account IDs, one for each AWS account that the bucket is configured to replicate one or more objects to.

", "S3BucketDefinitionForJob$Buckets" : "

An array that lists the names of the buckets.

", - "SimpleScopeTerm$Values" : "

An array that lists the values to use in the condition. If the value for the key property is OBJECT_EXTENSION, this array can specify multiple values and Amazon Macie uses an OR operator to join the values. Otherwise, this array can specify only one value. Valid values for each supported property (key) are:

", + "SimpleScopeTerm$Values" : "

An array that lists the values to use in the condition. If the value for the key property is OBJECT_EXTENSION, this array can specify multiple values and Amazon Macie uses an OR operator to join the values. Otherwise, this array can specify only one value. Valid values for each supported property (key) are:

", "TestCustomDataIdentifierRequest$IgnoreWords" : "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4 - 90 characters. Ignore words are case sensitive.

", "TestCustomDataIdentifierRequest$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 4 - 90 characters. Keywords aren't case sensitive.

", "UsageStatisticsFilter$Values" : "

An array that lists values to use in the condition, based on the value for the field specified by the key property. If the value for the key property is accountId, this array can specify multiple values. Otherwise, this array can specify only one value.

Valid values for each supported field are:

" @@ -1502,14 +1502,14 @@ "ObjectCountByEncryptionType$KmsManaged" : "

The total number of objects that are encrypted using an AWS Key Management Service (AWS KMS) customer master key (CMK). The objects use AWS KMS AWS-managed (AWS-KMS) encryption or AWS KMS customer-managed (SSE-KMS) encryption.

", "ObjectCountByEncryptionType$S3Managed" : "

The total number of objects that are encrypted using an Amazon S3-managed key. The objects use Amazon S3-managed (SSE-S3) encryption.

", "ObjectCountByEncryptionType$Unencrypted" : "

The total number of objects that aren't encrypted or use client-side encryption.

", - "ObjectLevelStatistics$FileType" : "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects don’t have a file name extension for a supported file or storage format.

", + "ObjectLevelStatistics$FileType" : "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects don't have a file name extension for a supported file or storage format.

", "ObjectLevelStatistics$StorageClass" : "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported storage class.

", - "ObjectLevelStatistics$Total" : "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported storage class or don’t have a file name extension for a supported file or storage format.

", + "ObjectLevelStatistics$Total" : "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported storage class or don't have a file name extension for a supported file or storage format.

", "Page$PageNumber" : "

The page number of the page that contains the data.

", "Range$End" : "

Possible values are:

", "Range$Start" : "

Possible values are:

", "Range$StartColumn" : "

The column number for the column that contains the data, if the file contains structured data.

", - "Record$RecordIndex" : "

The row index, starting from 0, for the row that contains the data.

", + "Record$RecordIndex" : "

The record index, starting from 0, for the record that contains the data.

", "S3Object$Size" : "

The total storage size, in bytes, of the object.

", "SensitiveDataItem$TotalCount" : "

The total number of occurrences of the sensitive data that was detected.

", "ServiceLimit$Value" : "

The value for the metric specified by the UsageByAccount.type field in the response.

", @@ -1653,6 +1653,7 @@ "Member$Arn" : "

The Amazon Resource Name (ARN) of the account.

", "Member$Email" : "

The email address for the account.

", "Member$MasterAccountId" : "

The AWS account ID for the master account.

", + "Record$JsonPath" : "

The path, as a JSONPath expression, to the field (in an Apache Avro object container) or record (in an Apache Parquet file) that contains the data.

If the name of an element exceeds 20 characters, Amazon Macie truncates the name by removing characters from the beginning of the name. If the resulting full path exceeds 250 characters, Macie also truncates the path, starting with the first element in the path, until the path contains 250 or fewer characters.

", "ResourceNotFoundException$Message" : "

The explanation of the error that occurred.

", "S3Bucket$Arn" : "

The Amazon Resource Name (ARN) of the bucket.

", "S3Bucket$Name" : "

The name of the bucket.

", @@ -1695,7 +1696,7 @@ "UserIdentityRoot$AccountId" : "

The unique identifier for the AWS account.

", "UserIdentityRoot$Arn" : "

The Amazon Resource Name (ARN) of the principal that performed the action. The last section of the ARN contains the name of the user or role that performed the action.

", "UserIdentityRoot$PrincipalId" : "

The unique identifier for the entity that performed the action.

", - "UserPausedDetails$JobImminentExpirationHealthEventArn" : "

The Amazon Resource Name (ARN) of the AWS Health event that Amazon Macie sent to notify you of the job’s pending expiration and cancellation. This value is null if a job has been paused for less than 23 days.

", + "UserPausedDetails$JobImminentExpirationHealthEventArn" : "

The Amazon Resource Name (ARN) of the AWS Health event that Amazon Macie sent to notify you of the job's pending expiration and cancellation. This value is null if a job has been paused for less than 23 days.

", "ValidationException$Message" : "

The explanation of the error that occurred.

", "__listOf__string$member" : null } @@ -1727,7 +1728,7 @@ "S3Object$LastModified" : "

The date and time, in UTC and extended ISO 8601 format, when the object was last modified.

", "SessionContextAttributes$CreationDate" : "

The date and time, in UTC and ISO 8601 format, when the credentials were issued.

", "UsageRecord$FreeTrialStartDate" : "

The date and time, in UTC and extended ISO 8601 format, when the free trial started for the account.

", - "UserPausedDetails$JobExpiresAt" : "

The date and time, in UTC and extended ISO 8601 format, when the job will expire and be cancelled if you don't resume it first. If you don’t resume a job within 30 days of pausing it, the job expires and Amazon Macie cancels it.

", + "UserPausedDetails$JobExpiresAt" : "

The date and time, in UTC and extended ISO 8601 format, when the job will expire and be cancelled if you don't resume it first. If you don't resume a job within 30 days of pausing it, the job expires and Amazon Macie cancels it.

", "UserPausedDetails$JobPausedAt" : "

The date and time, in UTC and extended ISO 8601 format, when you paused the job.

" } } diff --git a/models/apis/s3/2006-03-01/api-2.json b/models/apis/s3/2006-03-01/api-2.json index 3568a0dadeb..eb6a26be3b0 100644 --- a/models/apis/s3/2006-03-01/api-2.json +++ b/models/apis/s3/2006-03-01/api-2.json @@ -115,6 +115,15 @@ }, "input":{"shape":"DeleteBucketEncryptionRequest"} }, + "DeleteBucketIntelligentTieringConfiguration":{ + "name":"DeleteBucketIntelligentTieringConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/{Bucket}?intelligent-tiering", + "responseCode":204 + }, + "input":{"shape":"DeleteBucketIntelligentTieringConfigurationRequest"} + }, "DeleteBucketInventoryConfiguration":{ "name":"DeleteBucketInventoryConfiguration", "http":{ @@ -280,6 +289,15 @@ "input":{"shape":"GetBucketEncryptionRequest"}, "output":{"shape":"GetBucketEncryptionOutput"} }, + "GetBucketIntelligentTieringConfiguration":{ + "name":"GetBucketIntelligentTieringConfiguration", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?intelligent-tiering" + }, + "input":{"shape":"GetBucketIntelligentTieringConfigurationRequest"}, + "output":{"shape":"GetBucketIntelligentTieringConfigurationOutput"} + }, "GetBucketInventoryConfiguration":{ "name":"GetBucketInventoryConfiguration", "http":{ @@ -444,7 +462,8 @@ "input":{"shape":"GetObjectRequest"}, "output":{"shape":"GetObjectOutput"}, "errors":[ - {"shape":"NoSuchKey"} + {"shape":"NoSuchKey"}, + {"shape":"InvalidObjectState"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html" }, @@ -550,6 +569,15 @@ "input":{"shape":"ListBucketAnalyticsConfigurationsRequest"}, "output":{"shape":"ListBucketAnalyticsConfigurationsOutput"} }, + "ListBucketIntelligentTieringConfigurations":{ + "name":"ListBucketIntelligentTieringConfigurations", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}?intelligent-tiering" + }, + "input":{"shape":"ListBucketIntelligentTieringConfigurationsRequest"}, + "output":{"shape":"ListBucketIntelligentTieringConfigurationsOutput"} + }, "ListBucketInventoryConfigurations":{ "name":"ListBucketInventoryConfigurations", "http":{ @@ -680,6 +708,14 @@ "input":{"shape":"PutBucketEncryptionRequest"}, "httpChecksumRequired":true }, + "PutBucketIntelligentTieringConfiguration":{ + "name":"PutBucketIntelligentTieringConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}?intelligent-tiering" + }, + "input":{"shape":"PutBucketIntelligentTieringConfigurationRequest"} + }, "PutBucketInventoryConfiguration":{ "name":"PutBucketInventoryConfiguration", "http":{ @@ -1092,6 +1128,13 @@ "type":"string", "enum":["CSV"] }, + "ArchiveStatus":{ + "type":"string", + "enum":[ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS" + ] + }, "Body":{"type":"blob"}, "Bucket":{ "type":"structure", @@ -2058,6 +2101,25 @@ } } }, + "DeleteBucketIntelligentTieringConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Id" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Id":{ + "shape":"IntelligentTieringId", + "location":"querystring", + "locationName":"id" + } + } + }, "DeleteBucketInventoryConfigurationRequest":{ "type":"structure", "required":[ @@ -2721,6 +2783,32 @@ } } }, + "GetBucketIntelligentTieringConfigurationOutput":{ + "type":"structure", + "members":{ + "IntelligentTieringConfiguration":{"shape":"IntelligentTieringConfiguration"} + }, + "payload":"IntelligentTieringConfiguration" + }, + "GetBucketIntelligentTieringConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Id" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Id":{ + "shape":"IntelligentTieringId", + "location":"querystring", + "locationName":"id" + } + } + }, "GetBucketInventoryConfigurationOutput":{ "type":"structure", "members":{ @@ -3696,6 +3784,11 @@ "location":"header", "locationName":"x-amz-restore" }, + "ArchiveStatus":{ + "shape":"ArchiveStatus", + "location":"header", + "locationName":"x-amz-archive-status" + }, "LastModified":{ "shape":"LastModified", "location":"header", @@ -3929,6 +4022,71 @@ "Parquet":{"shape":"ParquetInput"} } }, + "IntelligentTieringAccessTier":{ + "type":"string", + "enum":[ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS" + ] + }, + "IntelligentTieringAndOperator":{ + "type":"structure", + "members":{ + "Prefix":{"shape":"Prefix"}, + "Tags":{ + "shape":"TagSet", + "flattened":true, + "locationName":"Tag" + } + } + }, + "IntelligentTieringConfiguration":{ + "type":"structure", + "required":[ + "Id", + "Status", + "Tierings" + ], + "members":{ + "Id":{"shape":"IntelligentTieringId"}, + "Filter":{"shape":"IntelligentTieringFilter"}, + "Status":{"shape":"IntelligentTieringStatus"}, + "Tierings":{ + "shape":"TieringList", + "locationName":"Tiering" + } + } + }, + "IntelligentTieringConfigurationList":{ + "type":"list", + "member":{"shape":"IntelligentTieringConfiguration"}, + "flattened":true + }, + "IntelligentTieringDays":{"type":"integer"}, + "IntelligentTieringFilter":{ + "type":"structure", + "members":{ + "Prefix":{"shape":"Prefix"}, + "Tag":{"shape":"Tag"}, + "And":{"shape":"IntelligentTieringAndOperator"} + } + }, + "IntelligentTieringId":{"type":"string"}, + "IntelligentTieringStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "InvalidObjectState":{ + "type":"structure", + "members":{ + "StorageClass":{"shape":"StorageClass"}, + "AccessTier":{"shape":"IntelligentTieringAccessTier"} + }, + "exception":true + }, "InventoryConfiguration":{ "type":"structure", "required":[ @@ -4199,6 +4357,34 @@ } } }, + "ListBucketIntelligentTieringConfigurationsOutput":{ + "type":"structure", + "members":{ + "IsTruncated":{"shape":"IsTruncated"}, + "ContinuationToken":{"shape":"Token"}, + "NextContinuationToken":{"shape":"NextToken"}, + "IntelligentTieringConfigurationList":{ + "shape":"IntelligentTieringConfigurationList", + "locationName":"IntelligentTieringConfiguration" + } + } + }, + "ListBucketIntelligentTieringConfigurationsRequest":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "ContinuationToken":{ + "shape":"Token", + "location":"querystring", + "locationName":"continuation-token" + } + } + }, "ListBucketInventoryConfigurationsOutput":{ "type":"structure", "members":{ @@ -4679,10 +4865,7 @@ "MetadataValue":{"type":"string"}, "Metrics":{ "type":"structure", - "required":[ - "Status", - "EventThreshold" - ], + "required":["Status"], "members":{ "Status":{"shape":"MetricsStatus"}, "EventThreshold":{"shape":"ReplicationTimeValue"} @@ -5303,6 +5486,32 @@ }, "payload":"ServerSideEncryptionConfiguration" }, + "PutBucketIntelligentTieringConfigurationRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Id", + "IntelligentTieringConfiguration" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Id":{ + "shape":"IntelligentTieringId", + "location":"querystring", + "locationName":"id" + }, + "IntelligentTieringConfiguration":{ + "shape":"IntelligentTieringConfiguration", + "locationName":"IntelligentTieringConfiguration", + "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + } + }, + "payload":"IntelligentTieringConfiguration" + }, "PutBucketInventoryConfigurationRequest":{ "type":"structure", "required":[ @@ -6926,6 +7135,22 @@ "Expedited" ] }, + "Tiering":{ + "type":"structure", + "required":[ + "Days", + "AccessTier" + ], + "members":{ + "Days":{"shape":"IntelligentTieringDays"}, + "AccessTier":{"shape":"IntelligentTieringAccessTier"} + } + }, + "TieringList":{ + "type":"list", + "member":{"shape":"Tiering"}, + "flattened":true + }, "Token":{"type":"string"}, "TopicArn":{"type":"string"}, "TopicConfiguration":{ diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index a210aeea023..8cb57a83beb 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -11,6 +11,7 @@ "DeleteBucketAnalyticsConfiguration": "

Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to DeleteBucketAnalyticsConfiguration:

", "DeleteBucketCors": "

Deletes the cors configuration information set for the bucket.

To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

Related Resources:

", "DeleteBucketEncryption": "

This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

", + "DeleteBucketIntelligentTieringConfiguration": "

Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to DeleteBucketIntelligentTieringConfiguration include:

", "DeleteBucketInventoryConfiguration": "

Deletes an inventory configuration (identified by the inventory ID) from the bucket.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

Operations related to DeleteBucketInventoryConfiguration include:

", "DeleteBucketLifecycle": "

Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.

To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration action. By default, the bucket owner has this permission and the bucket owner can grant this permission to others.

There is usually some time lag before lifecycle configuration deletion is fully propagated to all the Amazon S3 systems.

For more information about the object expiration, see Elements to Describe Lifecycle Actions.

Related actions include:

", "DeleteBucketMetricsConfiguration": "

Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.

To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to DeleteBucketMetricsConfiguration:

", @@ -28,6 +29,7 @@ "GetBucketAnalyticsConfiguration": "

This implementation of the GET operation returns an analytics configuration (identified by the analytics configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service Developer Guide.

Related Resources

", "GetBucketCors": "

Returns the cors configuration information set for the bucket.

To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

For more information about cors, see Enabling Cross-Origin Resource Sharing.

The following operations are related to GetBucketCors:

", "GetBucketEncryption": "

Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to GetBucketEncryption:

", + "GetBucketIntelligentTieringConfiguration": "

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to GetBucketIntelligentTieringConfiguration include:

", "GetBucketInventoryConfiguration": "

Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

The following operations are related to GetBucketInventoryConfiguration:

", "GetBucketLifecycle": "

For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter element, you should see the updated version of this topic. This topic is provided for backward compatibility.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycle has the following special error:

The following operations are related to GetBucketLifecycle:

", "GetBucketLifecycleConfiguration": "

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier API description, see GetBucketLifecycle.

Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

GetBucketLifecycleConfiguration has the following special error:

The following operations are related to GetBucketLifecycleConfiguration:

", @@ -44,7 +46,7 @@ "GetBucketTagging": "

Returns the tag set associated with the bucket.

To use this operation, you must have permission to perform the s3:GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

GetBucketTagging has the following special error:

The following operations are related to GetBucketTagging:

", "GetBucketVersioning": "

Returns the versioning state of a bucket.

To retrieve the versioning state of a bucket, you must be the bucket owner.

This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is enabled, the bucket owner must use an authentication device to change the versioning state of the bucket.

The following operations are related to GetBucketVersioning:

", "GetBucketWebsite": "

Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.

This GET operation requires the S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission.

The following operations are related to DeleteBucketWebsite:

", - "GetObject": "

Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE storage classes, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

Versioning

By default, the GET operation returns the current version of an object. To return a different version, use the versionId subresource.

If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

For more information about versioning, see PutBucketVersioning.

Overriding Response Header Values

There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

Additional Considerations about Request Headers

If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

For more information about conditional requests, see RFC 7232.

The following operations are related to GetObject:

", + "GetObject": "

Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

If the object you are retrieving is stored in the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering Archive, or S3 Intelligent-Tiering Deep Archive storage classes, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

Versioning

By default, the GET operation returns the current version of an object. To return a different version, use the versionId subresource.

If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

For more information about versioning, see PutBucketVersioning.

Overriding Response Header Values

There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

Additional Considerations about Request Headers

If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

For more information about conditional requests, see RFC 7232.

The following operations are related to GetObject:

", "GetObjectAcl": "

Returns the access control list (ACL) of an object. To use this operation, you must have READ_ACP access to the object.

This action is not supported by Amazon S3 on Outposts.

Versioning

By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.

The following operations are related to GetObjectAcl:

", "GetObjectLegalHold": "

Gets an object's current Legal Hold status. For more information, see Locking Objects.

This action is not supported by Amazon S3 on Outposts.

", "GetObjectLockConfiguration": "

Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.

", @@ -55,6 +57,7 @@ "HeadBucket": "

This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK if the bucket exists and you have permission to access it. Otherwise, the operation might return responses such as 404 Not Found and 403 Forbidden.

To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

", "HeadObject": "

The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body.

If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

Consider the following when using request headers:

For more information about conditional requests, see RFC 7232.

Permissions

You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

The following operation is related to HeadObject:

", "ListBucketAnalyticsConfigurations": "

Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

The following operations are related to ListBucketAnalyticsConfigurations:

", + "ListBucketIntelligentTieringConfigurations": "

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to ListBucketIntelligentTieringConfigurations include:

", "ListBucketInventoryConfigurations": "

Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about the Amazon S3 inventory feature, see Amazon S3 Inventory

The following operations are related to ListBucketInventoryConfigurations:

", "ListBucketMetricsConfigurations": "

Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.

This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to ListBucketMetricsConfigurations:

", "ListBuckets": "

Returns a list of all buckets owned by the authenticated sender of the request.

", @@ -68,6 +71,7 @@ "PutBucketAnalyticsConfiguration": "

Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.

You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.

You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

Special Errors

Related Resources

", "PutBucketCors": "

Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

Related Resources

", "PutBucketEncryption": "

This implementation of the PUT operation uses the encryption subresource to set the default encryption state of an existing bucket.

This implementation of the PUT operation sets default encryption for a bucket using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Related Resources

", + "PutBucketIntelligentTieringConfiguration": "

Puts a S3 Intelligent-Tiering configuration to the specified bucket.

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

Operations related to PutBucketIntelligentTieringConfiguration include:

", "PutBucketInventoryConfiguration": "

This implementation of the PUT operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.

You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Special Errors

Related Resources

", "PutBucketLifecycle": "

For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.

By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

Related Resources

", "PutBucketLifecycleConfiguration": "

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

Rules

You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:

For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

Permissions

By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

The following are related to PutBucketLifecycleConfiguration:

", @@ -75,7 +79,7 @@ "PutBucketMetricsConfiguration": "

Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.

To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

The following operations are related to PutBucketMetricsConfiguration:

GetBucketLifecycle has the following special error:

", "PutBucketNotification": "

No longer used, see the PutBucketNotificationConfiguration operation.

", "PutBucketNotificationConfiguration": "

Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

<NotificationConfiguration>

</NotificationConfiguration>

This operation replaces the existing notification configuration with the configuration you include in the request body.

After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

You can disable notifications by adding the empty NotificationConfiguration element.

By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification permission.

The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 will not add the configuration to your bucket.

Responses

If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to the topic.

The following operation is related to PutBucketNotificationConfiguration:

", - "PutBucketOwnershipControls": "

Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

For information about Amazon S3 Object Ownership, see Using Object Ownership.

The following operations are related to GetBucketOwnershipControls:

", + "PutBucketOwnershipControls": "

Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

For information about Amazon S3 Object Ownership, see Using Object Ownership.

The following operations are related to PutBucketOwnershipControls:

", "PutBucketPolicy": "

Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

The following operations are related to PutBucketPolicy:

", "PutBucketReplication": "

Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

To perform this operation, the user or role performing the operation must have the iam:PassRole permission.

Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset. All rules must specify the same destination bucket.

To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

The latest version of the replication configuration XML is V2. XML V2 replication configurations are those that contain the Filter element for rules, and rules that specify S3 Replication Time Control (S3 RTC). In XML V2 replication configurations, Amazon S3 doesn't replicate delete markers. Therefore, you must set the DeleteMarkerReplication element to Disabled. For backward compatibility, Amazon S3 continues to support the XML V1 replication configuration.

For information about enabling versioning on a bucket, see Using Versioning.

By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

Handling Replication of Encrypted Objects

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

For information on PutBucketReplication errors, see List of replication-related error codes

The following operations are related to PutBucketReplication:

", "PutBucketRequestPayment": "

Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.

The following operations are related to PutBucketRequestPayment:

", @@ -89,7 +93,7 @@ "PutObjectRetention": "

Places an Object Retention configuration on an object.

This action is not supported by Amazon S3 on Outposts.

Related Resources

", "PutObjectTagging": "

Sets the supplied tag-set to an object that already exists in a bucket.

A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

For information about the Amazon S3 object tagging feature, see Object Tagging.

Special Errors

Related Resources

", "PutPublicAccessBlock": "

Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

Related Resources

", - "RestoreObject": "

Restores an archived copy of an object back into Amazon S3

This action is not supported by Amazon S3 on Outposts.

This action performs the following types of requests:

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

When making a select request, do the following:

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

When making a select request, you can also do the following:

The following are additional important facts about the select feature:

Restoring Archives

Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To access an archived object, you must first initiate a restore request. This restores a temporary copy of the archived object. In a restore request, you specify the number of days that you want the restored copy to exist. After the specified period, Amazon S3 deletes the temporary copy but the object remains archived in the GLACIER or DEEP_ARCHIVE storage class that object was restored from.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

The time it takes restore jobs to finish depends on which storage class the object is being restored from and which data access tier you specify.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. You upgrade the speed of an in-progress restoration by issuing another restore request to the same object, setting a new Tier request element. When issuing a request to upgrade the restore tier, you must choose a tier that is faster than the tier that the in-progress restore is using. You must not change any other parameters, such as the Days request element. For more information, see Upgrading the Speed of an In-Progress Restore in the Amazon Simple Storage Service Developer Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

Responses

A successful operation returns either the 200 OK or 202 Accepted status code.

Special Errors

Related Resources

", + "RestoreObject": "

Restores an archived copy of an object back into Amazon S3

This action is not supported by Amazon S3 on Outposts.

This action performs the following types of requests:

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

Querying Archives with Select Requests

You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

When making a select request, do the following:

For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

When making a select request, you can also do the following:

The following are additional important facts about the select feature:

Restoring Archives

Objects that you archive to the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering Archive, or S3 Intelligent-Tiering Deep Archive storage classes are not accessible in real time. For objects in Archive Access tier or Deep Archive Access tier you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

Responses

A successful operation returns either the 200 OK or 202 Accepted status code.

Special Errors

Related Resources

", "SelectObjectContent": "

This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

This action is not supported by Amazon S3 on Outposts.

For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.

Permissions

You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.

Object Data Formats

You can use Amazon S3 Select to query objects that have the following format properties:

Working with the Response Body

Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response .

GetObject Support

The SelectObjectContent operation does not support the following GetObject functionality. For more information, see GetObject.

Special Errors

For a list of special errors for this operation, see List of SELECT Object Content Error Codes

Related Resources

", "UploadPart": "

Uploads a part in a multipart upload.

In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.

To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.

If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).

Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service Developer Guide .

For information on the permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.

You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.

Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.

If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.

Special Errors

Related Resources

", "UploadPartCopy": "

Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source in your request and a byte range by adding the request header x-amz-copy-source-range in your request.

The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service Developer Guide.

Instead of using an existing object as part data, you might use the UploadPart operation and provide data in your request.

You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.

For more information about using the UploadPartCopy operation, see the following:

Note the following additional considerations about the request headers x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and x-amz-copy-source-if-modified-since:

Versioning

If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source.

You can optionally specify a specific version of the source object to copy by adding the versionId subresource as shown in the following example:

x-amz-copy-source: /bucket/object?versionId=version id

Special Errors

Related Resources

" @@ -167,7 +171,7 @@ "DeleteBucketInventoryConfigurationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "DeleteBucketLifecycleRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "DeleteBucketMetricsConfigurationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "DeleteBucketOwnershipControlsRequest$ExpectedBucketOwner": null, + "DeleteBucketOwnershipControlsRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "DeleteBucketPolicyRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "DeleteBucketReplicationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "DeleteBucketRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", @@ -190,7 +194,7 @@ "GetBucketLoggingRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "GetBucketMetricsConfigurationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "GetBucketNotificationConfigurationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "GetBucketOwnershipControlsRequest$ExpectedBucketOwner": null, + "GetBucketOwnershipControlsRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "GetBucketPolicyRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "GetBucketPolicyStatusRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "GetBucketReplicationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", @@ -229,7 +233,7 @@ "PutBucketMetricsConfigurationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "PutBucketNotificationConfigurationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "PutBucketNotificationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "PutBucketOwnershipControlsRequest$ExpectedBucketOwner": null, + "PutBucketOwnershipControlsRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "PutBucketPolicyRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "PutBucketReplicationRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "PutBucketRequestPaymentRequest$ExpectedBucketOwner": "

The account id of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", @@ -345,6 +349,12 @@ "AnalyticsS3BucketDestination$Format": "

Specifies the file format used when exporting data to Amazon S3.

" } }, + "ArchiveStatus": { + "base": null, + "refs": { + "HeadObjectOutput$ArchiveStatus": "

The archive state of the head object.

" + } + }, "Body": { "base": null, "refs": { @@ -425,6 +435,7 @@ "DeleteBucketAnalyticsConfigurationRequest$Bucket": "

The name of the bucket from which an analytics configuration is deleted.

", "DeleteBucketCorsRequest$Bucket": "

Specifies the bucket whose cors configuration is being deleted.

", "DeleteBucketEncryptionRequest$Bucket": "

The name of the bucket containing the server-side encryption configuration to delete.

", + "DeleteBucketIntelligentTieringConfigurationRequest$Bucket": "

The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

", "DeleteBucketInventoryConfigurationRequest$Bucket": "

The name of the bucket containing the inventory configuration to delete.

", "DeleteBucketLifecycleRequest$Bucket": "

The bucket name of the lifecycle to delete.

", "DeleteBucketMetricsConfigurationRequest$Bucket": "

The name of the bucket containing the metrics configuration to delete.

", @@ -444,6 +455,7 @@ "GetBucketAnalyticsConfigurationRequest$Bucket": "

The name of the bucket from which an analytics configuration is retrieved.

", "GetBucketCorsRequest$Bucket": "

The bucket name for which to get the cors configuration.

", "GetBucketEncryptionRequest$Bucket": "

The name of the bucket from which the server-side encryption configuration is retrieved.

", + "GetBucketIntelligentTieringConfigurationRequest$Bucket": "

The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

", "GetBucketInventoryConfigurationRequest$Bucket": "

The name of the bucket containing the inventory configuration to retrieve.

", "GetBucketLifecycleConfigurationRequest$Bucket": "

The name of the bucket for which to get the lifecycle information.

", "GetBucketLifecycleRequest$Bucket": "

The name of the bucket for which to get the lifecycle information.

", @@ -471,6 +483,7 @@ "HeadObjectRequest$Bucket": "

The name of the bucket containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "InventoryS3BucketDestination$Bucket": "

The Amazon Resource Name (ARN) of the bucket where inventory results will be published.

", "ListBucketAnalyticsConfigurationsRequest$Bucket": "

The name of the bucket from which analytics configurations are retrieved.

", + "ListBucketIntelligentTieringConfigurationsRequest$Bucket": "

The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

", "ListBucketInventoryConfigurationsRequest$Bucket": "

The name of the bucket containing the inventory configurations to retrieve.

", "ListBucketMetricsConfigurationsRequest$Bucket": "

The name of the bucket containing the metrics configurations to retrieve.

", "ListMultipartUploadsOutput$Bucket": "

The name of the bucket to which the multipart upload was initiated.

", @@ -488,6 +501,7 @@ "PutBucketAnalyticsConfigurationRequest$Bucket": "

The name of the bucket to which an analytics configuration is stored.

", "PutBucketCorsRequest$Bucket": "

Specifies the bucket impacted by the corsconfiguration.

", "PutBucketEncryptionRequest$Bucket": "

Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

", + "PutBucketIntelligentTieringConfigurationRequest$Bucket": "

The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.

", "PutBucketInventoryConfigurationRequest$Bucket": "

The name of the bucket where the inventory configuration will be stored.

", "PutBucketLifecycleConfigurationRequest$Bucket": "

The name of the bucket for which to set the configuration.

", "PutBucketLifecycleRequest$Bucket": "

", @@ -509,7 +523,7 @@ "PutObjectRetentionRequest$Bucket": "

The bucket name that contains the object you want to apply this Object Retention configuration to.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

", "PutObjectTaggingRequest$Bucket": "

The bucket name containing the object.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "PutPublicAccessBlockRequest$Bucket": "

The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want to set.

", - "RestoreObjectRequest$Bucket": "

The bucket name or containing the object to restore.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", + "RestoreObjectRequest$Bucket": "

The bucket name containing the object to restore.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", "S3Location$BucketName": "

The name of the bucket where the restore results will be placed.

", "SelectObjectContentRequest$Bucket": "

The S3 bucket.

", "UploadPartCopyRequest$Bucket": "

The bucket name.

When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.

When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.

", @@ -915,7 +929,7 @@ "LifecycleExpiration$Days": "

Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.

", "NoncurrentVersionExpiration$NoncurrentDays": "

Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates When an Object Became Noncurrent in the Amazon Simple Storage Service Developer Guide.

", "NoncurrentVersionTransition$NoncurrentDays": "

Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent in the Amazon Simple Storage Service Developer Guide.

", - "RestoreRequest$Days": "

Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.

", + "RestoreRequest$Days": "

Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.

The Days element is required for regular restores, and must not be provided for select requests.

", "Transition$Days": "

Indicates the number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer.

" } }, @@ -952,6 +966,11 @@ "refs": { } }, + "DeleteBucketIntelligentTieringConfigurationRequest": { + "base": null, + "refs": { + } + }, "DeleteBucketInventoryConfigurationRequest": { "base": null, "refs": { @@ -1013,7 +1032,7 @@ } }, "DeleteMarkerReplication": { - "base": "

Specifies whether Amazon S3 replicates the delete markers. If you specify a Filter, you must specify this element. However, in the latest version of replication configuration (when Filter is specified), Amazon S3 doesn't replicate delete markers. Therefore, the DeleteMarkerReplication element can contain only <Status>Disabled</Status>. For an example configuration, see Basic Rule Configuration.

If you don't specify the Filter element, Amazon S3 assumes that the replication configuration is the earlier version, V1. In the earlier version, Amazon S3 handled replication of delete markers differently. For more information, see Backward Compatibility.

", + "base": "

Specifies whether Amazon S3 replicates delete markers. If you specify a Filter in your replication configuration, you must also include a DeleteMarkerReplication element. If your Filter includes a Tag element, the DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does not support replicating delete markers for tag-based rules. For an example configuration, see Basic Rule Configuration.

For more information about delete marker replication, see Basic Rule Configuration.

If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.

", "refs": { "ReplicationRule$DeleteMarkerReplication": null } @@ -1021,7 +1040,7 @@ "DeleteMarkerReplicationStatus": { "base": null, "refs": { - "DeleteMarkerReplication$Status": "

Indicates whether to replicate delete markers.

In the current implementation, Amazon S3 doesn't replicate the delete markers. The status must be Disabled.

" + "DeleteMarkerReplication$Status": "

Indicates whether to replicate delete markers.

Indicates whether to replicate delete markers.

" } }, "DeleteMarkerVersionId": { @@ -1384,6 +1403,16 @@ "refs": { } }, + "GetBucketIntelligentTieringConfigurationOutput": { + "base": null, + "refs": { + } + }, + "GetBucketIntelligentTieringConfigurationRequest": { + "base": null, + "refs": { + } + }, "GetBucketInventoryConfigurationOutput": { "base": null, "refs": { @@ -1789,6 +1818,65 @@ "SelectParameters$InputSerialization": "

Describes the serialization format of the object.

" } }, + "IntelligentTieringAccessTier": { + "base": null, + "refs": { + "InvalidObjectState$AccessTier": null, + "Tiering$AccessTier": "

S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing frequently and infrequently accessed objects for a list of access tiers in the S3 Intelligent-Tiering storage class.

" + } + }, + "IntelligentTieringAndOperator": { + "base": "

A container for specifying S3 Intelligent-Tiering filters. The filters determine the subset of objects to which the rule applies.

", + "refs": { + "IntelligentTieringFilter$And": "

A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.

" + } + }, + "IntelligentTieringConfiguration": { + "base": "

Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket.

For information about the S3 Intelligent-Tiering storage class, see Storage class for automatically optimizing frequently and infrequently accessed objects.

", + "refs": { + "GetBucketIntelligentTieringConfigurationOutput$IntelligentTieringConfiguration": "

Container for S3 Intelligent-Tiering configuration.

", + "IntelligentTieringConfigurationList$member": null, + "PutBucketIntelligentTieringConfigurationRequest$IntelligentTieringConfiguration": "

Container for S3 Intelligent-Tiering configuration.

" + } + }, + "IntelligentTieringConfigurationList": { + "base": null, + "refs": { + "ListBucketIntelligentTieringConfigurationsOutput$IntelligentTieringConfigurationList": "

The list of S3 Intelligent-Tiering configurations for a bucket.

" + } + }, + "IntelligentTieringDays": { + "base": null, + "refs": { + "Tiering$Days": "

The number of days that you want your archived data to be accessible. The minimum number of days specified in the restore request must be at least 90 days. If a smaller value is specifed it will be ignored.

" + } + }, + "IntelligentTieringFilter": { + "base": "

The Filter is used to identify objects that the S3 Intelligent-Tiering configuration applies to.

", + "refs": { + "IntelligentTieringConfiguration$Filter": "

Specifies a bucket filter. The configuration only includes objects that meet the filter's criteria.

" + } + }, + "IntelligentTieringId": { + "base": null, + "refs": { + "DeleteBucketIntelligentTieringConfigurationRequest$Id": "

The ID used to identify the S3 Intelligent-Tiering configuration.

", + "GetBucketIntelligentTieringConfigurationRequest$Id": "

The ID used to identify the S3 Intelligent-Tiering configuration.

", + "IntelligentTieringConfiguration$Id": "

The ID used to identify the S3 Intelligent-Tiering configuration.

", + "PutBucketIntelligentTieringConfigurationRequest$Id": "

The ID used to identify the S3 Intelligent-Tiering configuration.

" + } + }, + "IntelligentTieringStatus": { + "base": null, + "refs": { + "IntelligentTieringConfiguration$Status": "

Specifies the status of the configuration.

" + } + }, + "InvalidObjectState": { + "base": "

Object is archived and inaccessible until restored.

", + "refs": { + } + }, "InventoryConfiguration": { "base": "

Specifies the inventory configuration for an Amazon S3 bucket. For more information, see GET Bucket inventory in the Amazon Simple Storage Service API Reference.

", "refs": { @@ -1895,6 +1983,7 @@ "base": null, "refs": { "ListBucketAnalyticsConfigurationsOutput$IsTruncated": "

Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.

", + "ListBucketIntelligentTieringConfigurationsOutput$IsTruncated": "

Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.

", "ListBucketInventoryConfigurationsOutput$IsTruncated": "

Tells whether the returned list of inventory configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken is provided for a subsequent request.

", "ListBucketMetricsConfigurationsOutput$IsTruncated": "

Indicates whether the returned list of metrics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.

", "ListMultipartUploadsOutput$IsTruncated": "

Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads.

", @@ -2028,6 +2117,16 @@ "refs": { } }, + "ListBucketIntelligentTieringConfigurationsOutput": { + "base": null, + "refs": { + } + }, + "ListBucketIntelligentTieringConfigurationsRequest": { + "base": null, + "refs": { + } + }, "ListBucketInventoryConfigurationsOutput": { "base": null, "refs": { @@ -2224,9 +2323,9 @@ } }, "Metrics": { - "base": "

A container specifying replication metrics-related settings enabling metrics and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified together with a ReplicationTime block.

", + "base": "

A container specifying replication metrics-related settings enabling replication metrics and events.

", "refs": { - "Destination$Metrics": "

A container specifying replication metrics-related settings enabling metrics and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified together with a ReplicationTime block.

" + "Destination$Metrics": "

A container specifying replication metrics-related settings enabling replication metrics and events.

" } }, "MetricsAndOperator": { @@ -2331,6 +2430,7 @@ "base": null, "refs": { "ListBucketAnalyticsConfigurationsOutput$NextContinuationToken": "

NextContinuationToken is sent when isTruncated is true, which indicates that there are more analytics configurations to list. The next request must include this NextContinuationToken. The token is obfuscated and is not a usable value.

", + "ListBucketIntelligentTieringConfigurationsOutput$NextContinuationToken": "

The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands.

", "ListBucketInventoryConfigurationsOutput$NextContinuationToken": "

The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands.

", "ListBucketMetricsConfigurationsOutput$NextContinuationToken": "

The marker used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

", "ListObjectsV2Output$NextContinuationToken": "

NextContinuationToken is sent when isTruncated is true, which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken is obfuscated and is not a real key

" @@ -2574,7 +2674,7 @@ "ObjectLockToken": { "base": null, "refs": { - "PutBucketReplicationRequest$Token": "

", + "PutBucketReplicationRequest$Token": "

A token to allow Object Lock to be enabled for an existing bucket.

", "PutObjectLockConfigurationRequest$Token": "

A token to allow Object Lock to be enabled for an existing bucket.

" } }, @@ -2773,6 +2873,8 @@ "AnalyticsFilter$Prefix": "

The prefix to use when evaluating an analytics filter.

", "AnalyticsS3BucketDestination$Prefix": "

The prefix to use when exporting data. The prefix is prepended to all results.

", "CommonPrefix$Prefix": "

Container for the specified common prefix.

", + "IntelligentTieringAndOperator$Prefix": "

An object key name prefix that identifies the subset of objects to which the configuration applies.

", + "IntelligentTieringFilter$Prefix": "

An object key name prefix that identifies the subset of objects to which the rule applies.

", "InventoryFilter$Prefix": "

The prefix that an object must have to be included in the inventory results.

", "InventoryS3BucketDestination$Prefix": "

The prefix that is prepended to all inventory results.

", "LifecycleRule$Prefix": "

Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.

", @@ -2797,7 +2899,7 @@ "Priority": { "base": null, "refs": { - "ReplicationRule$Priority": "

The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:

For more information, see Replication in the Amazon Simple Storage Service Developer Guide.

" + "ReplicationRule$Priority": "

The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:

For more information, see Replication in the Amazon Simple Storage Service Developer Guide.

" } }, "Progress": { @@ -2851,6 +2953,11 @@ "refs": { } }, + "PutBucketIntelligentTieringConfigurationRequest": { + "base": null, + "refs": { + } + }, "PutBucketInventoryConfigurationRequest": { "base": null, "refs": { @@ -3503,7 +3610,7 @@ "PublicAccessBlockConfiguration$BlockPublicAcls": "

Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE causes the following behavior:

Enabling this setting doesn't affect existing policies or ACLs.

", "PublicAccessBlockConfiguration$IgnorePublicAcls": "

Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket.

Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.

", "PublicAccessBlockConfiguration$BlockPublicPolicy": "

Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.

Enabling this setting doesn't affect existing bucket policies.

", - "PublicAccessBlockConfiguration$RestrictPublicBuckets": "

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS services and authorized users within this account if the bucket has a public policy.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

" + "PublicAccessBlockConfiguration$RestrictPublicBuckets": "

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS service principals and authorized users within this account if the bucket has a public policy.

Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

" } }, "Size": { @@ -3565,6 +3672,7 @@ "Destination$StorageClass": "

The storage class to use when replicating objects, such as S3 Standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.

For valid values, see the StorageClass element of the PUT Bucket replication action in the Amazon Simple Storage Service API Reference.

", "GetObjectOutput$StorageClass": "

Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.

", "HeadObjectOutput$StorageClass": "

Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.

For more information, see Storage Classes.

", + "InvalidObjectState$StorageClass": null, "ListPartsOutput$StorageClass": "

Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded object.

", "MultipartUpload$StorageClass": "

The class of storage used to store the object.

", "PutObjectRequest$StorageClass": "

By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.

", @@ -3599,6 +3707,7 @@ "base": "

A container of a key value name pair.

", "refs": { "AnalyticsFilter$Tag": "

The tag to use when evaluating an analytics filter.

", + "IntelligentTieringFilter$Tag": null, "LifecycleRuleFilter$Tag": "

This tag must exist in the object's tag set in order for the rule to apply.

", "MetricsFilter$Tag": "

The tag used when evaluating a metrics filter.

", "ReplicationRuleFilter$Tag": "

A container for specifying a tag key and value.

The rule applies only to objects that have the tag in their tag set.

", @@ -3617,6 +3726,7 @@ "AnalyticsAndOperator$Tags": "

The list of tags to use when evaluating an AND predicate.

", "GetBucketTaggingOutput$TagSet": "

Contains the tag set.

", "GetObjectTaggingOutput$TagSet": "

Contains the tag set.

", + "IntelligentTieringAndOperator$Tags": "

All of these tags must exist in the object's tag set in order for the configuration to apply.

", "LifecycleRuleAndOperator$Tags": "

All of these tags must exist in the object's tag set in order for the rule to apply.

", "MetricsAndOperator$Tags": "

The list of tags used when evaluating an AND predicate.

", "ReplicationRuleAndOperator$Tags": "

An array of tags containing key and value pairs.

", @@ -3672,8 +3782,20 @@ "Tier": { "base": null, "refs": { - "GlacierJobParameters$Tier": "

S3 Glacier retrieval tier at which the restore will be processed.

", - "RestoreRequest$Tier": "

S3 Glacier retrieval tier at which the restore will be processed.

" + "GlacierJobParameters$Tier": "

Retrieval tier at which the restore will be processed.

", + "RestoreRequest$Tier": "

Retrieval tier at which the restore will be processed.

" + } + }, + "Tiering": { + "base": "

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead.

", + "refs": { + "TieringList$member": null + } + }, + "TieringList": { + "base": null, + "refs": { + "IntelligentTieringConfiguration$Tierings": "

Specifies the S3 Intelligent-Tiering storage class tier of the configuration.

" } }, "Token": { @@ -3681,6 +3803,8 @@ "refs": { "ListBucketAnalyticsConfigurationsOutput$ContinuationToken": "

The marker that is used as a starting point for this analytics configuration list response. This value is present if it was sent in the request.

", "ListBucketAnalyticsConfigurationsRequest$ContinuationToken": "

The ContinuationToken that represents a placeholder from where this request should begin.

", + "ListBucketIntelligentTieringConfigurationsOutput$ContinuationToken": "

The ContinuationToken that represents a placeholder from where this request should begin.

", + "ListBucketIntelligentTieringConfigurationsRequest$ContinuationToken": "

The ContinuationToken that represents a placeholder from where this request should begin.

", "ListBucketInventoryConfigurationsOutput$ContinuationToken": "

If sent in the request, the marker that is used as a starting point for this inventory configuration list response.

", "ListBucketInventoryConfigurationsRequest$ContinuationToken": "

The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

", "ListBucketMetricsConfigurationsOutput$ContinuationToken": "

The marker that is used as a starting point for this metrics configuration list response. This value is present if it was sent in the request.

", diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index 57e77af1def..85043f6646a 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -257,8 +257,10 @@ "DeleteObject": [ { "input": { - "Bucket": "ExampleBucket", - "Key": "HappyFace.jpg" + "Bucket": "examplebucket", + "Key": "objectkey.jpg" + }, + "output": { }, "comments": { "input": { @@ -266,16 +268,14 @@ "output": { } }, - "description": "The following example deletes an object from a non-versioned bucket.", - "id": "to-delete-an-object-from-a-non-versioned-bucket-1481588533089", - "title": "To delete an object (from a non-versioned bucket)" + "description": "The following example deletes an object from an S3 bucket.", + "id": "to-delete-an-object-1472850136595", + "title": "To delete an object" }, { "input": { - "Bucket": "examplebucket", - "Key": "objectkey.jpg" - }, - "output": { + "Bucket": "ExampleBucket", + "Key": "HappyFace.jpg" }, "comments": { "input": { @@ -283,9 +283,9 @@ "output": { } }, - "description": "The following example deletes an object from an S3 bucket.", - "id": "to-delete-an-object-1472850136595", - "title": "To delete an object" + "description": "The following example deletes an object from a non-versioned bucket.", + "id": "to-delete-an-object-from-a-non-versioned-bucket-1481588533089", + "title": "To delete an object (from a non-versioned bucket)" } ], "DeleteObjectTagging": [ @@ -728,17 +728,18 @@ { "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "SampleFile.txt", + "Range": "bytes=0-9" }, "output": { "AcceptRanges": "bytes", - "ContentLength": "3191", - "ContentType": "image/jpeg", - "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "LastModified": "Thu, 15 Dec 2016 01:19:41 GMT", + "ContentLength": "10", + "ContentRange": "bytes 0-9/43", + "ContentType": "text/plain", + "ETag": "\"0d94420ffd0bc68cd3d152506b97a9cc\"", + "LastModified": "Thu, 09 Oct 2014 22:57:28 GMT", "Metadata": { }, - "TagCount": 2, "VersionId": "null" }, "comments": { @@ -747,25 +748,24 @@ "output": { } }, - "description": "The following example retrieves an object for an S3 bucket.", - "id": "to-retrieve-an-object-1481827837012", - "title": "To retrieve an object" + "description": "The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a specific byte range.", + "id": "to-retrieve-a-byte-range-of-an-object--1481832674603", + "title": "To retrieve a byte range of an object " }, { "input": { "Bucket": "examplebucket", - "Key": "SampleFile.txt", - "Range": "bytes=0-9" + "Key": "HappyFace.jpg" }, "output": { "AcceptRanges": "bytes", - "ContentLength": "10", - "ContentRange": "bytes 0-9/43", - "ContentType": "text/plain", - "ETag": "\"0d94420ffd0bc68cd3d152506b97a9cc\"", - "LastModified": "Thu, 09 Oct 2014 22:57:28 GMT", + "ContentLength": "3191", + "ContentType": "image/jpeg", + "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", + "LastModified": "Thu, 15 Dec 2016 01:19:41 GMT", "Metadata": { }, + "TagCount": 2, "VersionId": "null" }, "comments": { @@ -774,9 +774,9 @@ "output": { } }, - "description": "The following example retrieves an object for an S3 bucket. The request specifies the range header to retrieve a specific byte range.", - "id": "to-retrieve-a-byte-range-of-an-object--1481832674603", - "title": "To retrieve a byte range of an object " + "description": "The following example retrieves an object for an S3 bucket.", + "id": "to-retrieve-an-object-1481827837012", + "title": "To retrieve an object" } ], "GetObjectAcl": [ @@ -840,17 +840,20 @@ { "input": { "Bucket": "examplebucket", - "Key": "exampleobject", - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "Key": "HappyFace.jpg" }, "output": { "TagSet": [ { - "Key": "Key1", - "Value": "Value1" + "Key": "Key4", + "Value": "Value4" + }, + { + "Key": "Key3", + "Value": "Value3" } ], - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "VersionId": "null" }, "comments": { "input": { @@ -858,27 +861,24 @@ "output": { } }, - "description": "The following example retrieves tag set of an object. The request specifies object version.", - "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663", - "title": "To retrieve tag set of a specific object version" + "description": "The following example retrieves tag set of an object.", + "id": "to-retrieve-tag-set-of-an-object-1481833847896", + "title": "To retrieve tag set of an object" }, { "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "exampleobject", + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "output": { "TagSet": [ { - "Key": "Key4", - "Value": "Value4" - }, - { - "Key": "Key3", - "Value": "Value3" + "Key": "Key1", + "Value": "Value1" } ], - "VersionId": "null" + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "comments": { "input": { @@ -886,9 +886,9 @@ "output": { } }, - "description": "The following example retrieves tag set of an object.", - "id": "to-retrieve-tag-set-of-an-object-1481833847896", - "title": "To retrieve tag set of an object" + "description": "The following example retrieves tag set of an object. The request specifies object version.", + "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663", + "title": "To retrieve tag set of a specific object version" } ], "GetObjectTorrent": [ @@ -1565,26 +1565,6 @@ } ], "PutObject": [ - { - "input": { - "Body": "HappyFace.jpg", - "Bucket": "examplebucket", - "Key": "HappyFace.jpg" - }, - "output": { - "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" - }, - "comments": { - "input": { - }, - "output": { - } - }, - "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", - "id": "to-upload-an-object-1481760101010", - "title": "To upload an object" - }, { "input": { "Body": "filetoupload", @@ -1613,14 +1593,11 @@ "input": { "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "exampleobject", - "ServerSideEncryption": "AES256", - "Tagging": "key1=value1&key2=value2" + "Key": "objectkey" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "ServerSideEncryption": "AES256", - "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" + "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" }, "comments": { "input": { @@ -1628,9 +1605,9 @@ "output": { } }, - "description": "The following example uploads and object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", - "title": "To upload an object and specify server-side encryption and object tags" + "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-create-an-object-1483147613675", + "title": "To create an object." }, { "input": { @@ -1655,16 +1632,16 @@ }, { "input": { - "Body": "HappyFace.jpg", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", + "Key": "exampleobject", "ServerSideEncryption": "AES256", - "StorageClass": "STANDARD_IA" + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", "ServerSideEncryption": "AES256", - "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" + "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" }, "comments": { "input": { @@ -1672,9 +1649,9 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", - "id": "to-upload-an-object-(specify-optional-headers)", - "title": "To upload an object (specify optional headers)" + "description": "The following example uploads and object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", + "title": "To upload an object and specify server-side encryption and object tags" }, { "input": { @@ -1699,13 +1676,16 @@ }, { "input": { - "Body": "filetoupload", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "objectkey" + "Key": "HappyFace.jpg", + "ServerSideEncryption": "AES256", + "StorageClass": "STANDARD_IA" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" + "ServerSideEncryption": "AES256", + "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" }, "comments": { "input": { @@ -1713,9 +1693,29 @@ "output": { } }, - "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-create-an-object-1483147613675", - "title": "To create an object." + "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", + "id": "to-upload-an-object-(specify-optional-headers)", + "title": "To upload an object (specify optional headers)" + }, + { + "input": { + "Body": "HappyFace.jpg", + "Bucket": "examplebucket", + "Key": "HappyFace.jpg" + }, + "output": { + "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", + "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", + "id": "to-upload-an-object-1481760101010", + "title": "To upload an object" } ], "PutObjectAcl": [ diff --git a/models/apis/ssm/2014-11-06/api-2.json b/models/apis/ssm/2014-11-06/api-2.json index f9cea6a2047..c5cdf7ee735 100644 --- a/models/apis/ssm/2014-11-06/api-2.json +++ b/models/apis/ssm/2014-11-06/api-2.json @@ -2390,7 +2390,8 @@ "StartTimeBefore", "StartTimeAfter", "AutomationType", - "TagKey" + "TagKey", + "TargetResourceGroup" ] }, "AutomationExecutionFilterList":{ diff --git a/models/apis/ssm/2014-11-06/docs-2.json b/models/apis/ssm/2014-11-06/docs-2.json index 2c58216ffb1..64d6d281424 100644 --- a/models/apis/ssm/2014-11-06/docs-2.json +++ b/models/apis/ssm/2014-11-06/docs-2.json @@ -391,13 +391,13 @@ "AssociationFilterKey": { "base": null, "refs": { - "AssociationFilter$key": "

The name of the filter.

InstanceId has been deprecated.

" + "AssociationFilter$key": "

The name of the filter.

" } }, "AssociationFilterList": { "base": null, "refs": { - "ListAssociationsRequest$AssociationFilterList": "

One or more filters. Use a filter to return a more specific list of results.

Filtering associations using the InstanceID attribute only returns legacy associations created using the InstanceID attribute. Associations targeting the instance that are part of the Target Attributes ResourceGroup or Tags are not returned.

" + "ListAssociationsRequest$AssociationFilterList": "

One or more filters. Use a filter to return a more specific list of results.

" } }, "AssociationFilterOperatorType": { diff --git a/models/apis/storagegateway/2013-06-30/api-2.json b/models/apis/storagegateway/2013-06-30/api-2.json index f62e764935b..041fdc9317e 100644 --- a/models/apis/storagegateway/2013-06-30/api-2.json +++ b/models/apis/storagegateway/2013-06-30/api-2.json @@ -404,6 +404,19 @@ {"shape":"InternalServerError"} ] }, + "DescribeBandwidthRateLimitSchedule":{ + "name":"DescribeBandwidthRateLimitSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBandwidthRateLimitScheduleInput"}, + "output":{"shape":"DescribeBandwidthRateLimitScheduleOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, "DescribeCache":{ "name":"DescribeCache", "http":{ @@ -950,6 +963,19 @@ {"shape":"InternalServerError"} ] }, + "UpdateBandwidthRateLimitSchedule":{ + "name":"UpdateBandwidthRateLimitSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateBandwidthRateLimitScheduleInput"}, + "output":{"shape":"UpdateBandwidthRateLimitScheduleOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, "UpdateChapCredentials":{ "name":"UpdateChapCredentials", "http":{ @@ -1286,6 +1312,31 @@ "type":"long", "min":102400 }, + "BandwidthRateLimitInterval":{ + "type":"structure", + "required":[ + "StartHourOfDay", + "StartMinuteOfHour", + "EndHourOfDay", + "EndMinuteOfHour", + "DaysOfWeek" + ], + "members":{ + "StartHourOfDay":{"shape":"HourOfDay"}, + "StartMinuteOfHour":{"shape":"MinuteOfHour"}, + "EndHourOfDay":{"shape":"HourOfDay"}, + "EndMinuteOfHour":{"shape":"MinuteOfHour"}, + "DaysOfWeek":{"shape":"DaysOfWeek"}, + "AverageUploadRateLimitInBitsPerSec":{"shape":"BandwidthUploadRateLimit"}, + "AverageDownloadRateLimitInBitsPerSec":{"shape":"BandwidthDownloadRateLimit"} + } + }, + "BandwidthRateLimitIntervals":{ + "type":"list", + "member":{"shape":"BandwidthRateLimitInterval"}, + "max":20, + "min":0 + }, "BandwidthType":{ "type":"string", "max":25, @@ -1649,6 +1700,12 @@ "max":6, "min":0 }, + "DaysOfWeek":{ + "type":"list", + "member":{"shape":"DayOfWeek"}, + "max":7, + "min":1 + }, "DeleteAutomaticTapeCreationPolicyInput":{ "type":"structure", "required":["GatewayARN"], @@ -1830,6 +1887,20 @@ "AverageDownloadRateLimitInBitsPerSec":{"shape":"BandwidthDownloadRateLimit"} } }, + "DescribeBandwidthRateLimitScheduleInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeBandwidthRateLimitScheduleOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "BandwidthRateLimitIntervals":{"shape":"BandwidthRateLimitIntervals"} + } + }, "DescribeCacheInput":{ "type":"structure", "required":["GatewayARN"], @@ -3336,6 +3407,23 @@ "GatewayARN":{"shape":"GatewayARN"} } }, + "UpdateBandwidthRateLimitScheduleInput":{ + "type":"structure", + "required":[ + "GatewayARN", + "BandwidthRateLimitIntervals" + ], + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "BandwidthRateLimitIntervals":{"shape":"BandwidthRateLimitIntervals"} + } + }, + "UpdateBandwidthRateLimitScheduleOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, "UpdateChapCredentialsInput":{ "type":"structure", "required":[ diff --git a/models/apis/storagegateway/2013-06-30/docs-2.json b/models/apis/storagegateway/2013-06-30/docs-2.json index 297942b9187..ff12afbe2b2 100644 --- a/models/apis/storagegateway/2013-06-30/docs-2.json +++ b/models/apis/storagegateway/2013-06-30/docs-2.json @@ -32,6 +32,7 @@ "DeleteVolume": "

Deletes the specified storage volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is only supported in the cached volume and stored volume types. For stored volume gateways, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.

Before you delete a volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.

", "DescribeAvailabilityMonitorTest": "

Returns information about the most recent High Availability monitoring test that was performed on the host in a cluster. If a test isn't performed, the status and start time in the response would be null.

", "DescribeBandwidthRateLimit": "

Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect. This operation is supported for the stored volume, cached volume, and tape gateway types.

This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

", + "DescribeBandwidthRateLimitSchedule": "

Returns information about the bandwidth rate limit schedule of a gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. This operation is supported only in the volume and tape gateway types.

This operation returns information about a gateway's bandwidth rate limit schedule. A bandwidth rate limit schedule consists of one or more bandwidth rate limit intervals. A bandwidth rate limit interval defines a period of time on one or more days of the week, during which bandwidth rate limits are specified for uploading, downloading, or both.

A bandwidth rate limit interval consists of one or more days of the week, a start hour and minute, an ending hour and minute, and bandwidth rate limits for uploading and downloading

If no bandwidth rate limit schedule intervals are set for the gateway, this operation returns an empty response. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

", "DescribeCache": "

Returns information about the cache of a gateway. This operation is only supported in the cached volume, tape, and file gateway types.

The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.

", "DescribeCachediSCSIVolumes": "

Returns a description of the gateway volumes specified in the request. This operation is only supported in the cached volume gateway types.

The list of gateway volumes in the request must be from one gateway. In the response, AWS Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).

", "DescribeChapCredentials": "

Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair. This operation is supported in the volume and tape gateway types.

", @@ -74,6 +75,7 @@ "StartGateway": "

Starts a gateway that you previously shut down (see ShutdownGateway). After the gateway starts, you can then make other API calls, your applications can read from or write to the gateway's storage volumes and you will be able to take snapshot backups.

When you make a request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to be ready. You should call DescribeGatewayInformation and check the status before making any additional API calls. For more information, see ActivateGateway.

To specify which gateway to start, use the Amazon Resource Name (ARN) of the gateway in your request.

", "UpdateAutomaticTapeCreationPolicy": "

Updates the automatic tape creation policy of a gateway. Use this to update the policy with a new set of automatic tape creation rules. This is only supported for tape gateways.

By default, there is no automatic tape creation policy.

A gateway can have only one automatic tape creation policy.

", "UpdateBandwidthRateLimit": "

Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains. This operation is supported for the stored volume, cached volume, and tape gateway types.

By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

", + "UpdateBandwidthRateLimitSchedule": "

Updates the bandwidth rate limit schedule for a specified gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. Use this to initiate or update a gateway's bandwidth rate limit schedule. This operation is supported in the volume and tape gateway types.

", "UpdateChapCredentials": "

Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it. This operation is supported in the volume and tape gateway types.

When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.

", "UpdateGatewayInformation": "

Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

For gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN.

", "UpdateGatewaySoftwareNow": "

Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.

When you make this request, you get a 200 OK success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING state.

A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see Customizing your Windows iSCSI settings and Customizing your Linux iSCSI settings, respectively.

", @@ -218,10 +220,24 @@ "BandwidthDownloadRateLimit": { "base": null, "refs": { + "BandwidthRateLimitInterval$AverageDownloadRateLimitInBitsPerSec": "

The average download rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the download rate limit is not set.

", "DescribeBandwidthRateLimitOutput$AverageDownloadRateLimitInBitsPerSec": "

The average download bandwidth rate limit in bits per second. This field does not appear in the response if the download rate limit is not set.

", "UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec": "

The average download bandwidth rate limit in bits per second.

" } }, + "BandwidthRateLimitInterval": { + "base": "

Describes a bandwidth rate limit interval for a gateway. A bandwidth rate limit schedule consists of one or more bandwidth rate limit intervals. A bandwidth rate limit interval defines a period of time on one or more days of the week, during which bandwidth rate limits are specified for uploading, downloading, or both.

", + "refs": { + "BandwidthRateLimitIntervals$member": null + } + }, + "BandwidthRateLimitIntervals": { + "base": null, + "refs": { + "DescribeBandwidthRateLimitScheduleOutput$BandwidthRateLimitIntervals": "

An array that contains the bandwidth rate limit intervals for a tape or volume gateway.

", + "UpdateBandwidthRateLimitScheduleInput$BandwidthRateLimitIntervals": "

An array containing bandwidth rate limit schedule intervals for a gateway. When no bandwidth rate limit intervals have been scheduled, the array is empty.

" + } + }, "BandwidthType": { "base": null, "refs": { @@ -231,6 +247,7 @@ "BandwidthUploadRateLimit": { "base": null, "refs": { + "BandwidthRateLimitInterval$AverageUploadRateLimitInBitsPerSec": "

The average upload rate limit component of the bandwidth rate limit interval, in bits per second. This field does not appear in the response if the upload rate limit is not set.

", "DescribeBandwidthRateLimitOutput$AverageUploadRateLimitInBitsPerSec": "

The average upload bandwidth rate limit in bits per second. This field does not appear in the response if the upload rate limit is not set.

", "UpdateBandwidthRateLimitInput$AverageUploadRateLimitInBitsPerSec": "

The average upload bandwidth rate limit in bits per second.

" } @@ -478,10 +495,17 @@ "DayOfWeek": { "base": null, "refs": { + "DaysOfWeek$member": null, "DescribeMaintenanceStartTimeOutput$DayOfWeek": "

An ordinal number between 0 and 6 that represents the day of the week, where 0 represents Sunday and 6 represents Saturday. The day of week is in the time zone of the gateway.

", "UpdateMaintenanceStartTimeInput$DayOfWeek": "

The day of the week component of the maintenance start time week represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 Saturday.

" } }, + "DaysOfWeek": { + "base": null, + "refs": { + "BandwidthRateLimitInterval$DaysOfWeek": "

The days of the week component of the bandwidth rate limit interval, represented as ordinal numbers from 0 to 6, where 0 represents Sunday and 6 Saturday.

" + } + }, "DeleteAutomaticTapeCreationPolicyInput": { "base": null, "refs": { @@ -608,6 +632,16 @@ "refs": { } }, + "DescribeBandwidthRateLimitScheduleInput": { + "base": null, + "refs": { + } + }, + "DescribeBandwidthRateLimitScheduleOutput": { + "base": null, + "refs": { + } + }, "DescribeCacheInput": { "base": null, "refs": { @@ -1055,6 +1089,8 @@ "DescribeAvailabilityMonitorTestOutput$GatewayARN": null, "DescribeBandwidthRateLimitInput$GatewayARN": null, "DescribeBandwidthRateLimitOutput$GatewayARN": null, + "DescribeBandwidthRateLimitScheduleInput$GatewayARN": null, + "DescribeBandwidthRateLimitScheduleOutput$GatewayARN": null, "DescribeCacheInput$GatewayARN": null, "DescribeCacheOutput$GatewayARN": null, "DescribeGatewayInformationInput$GatewayARN": null, @@ -1108,6 +1144,8 @@ "UpdateAutomaticTapeCreationPolicyOutput$GatewayARN": null, "UpdateBandwidthRateLimitInput$GatewayARN": null, "UpdateBandwidthRateLimitOutput$GatewayARN": null, + "UpdateBandwidthRateLimitScheduleInput$GatewayARN": null, + "UpdateBandwidthRateLimitScheduleOutput$GatewayARN": null, "UpdateGatewayInformationInput$GatewayARN": null, "UpdateGatewayInformationOutput$GatewayARN": null, "UpdateGatewaySoftwareNowInput$GatewayARN": null, @@ -1205,6 +1243,8 @@ "HourOfDay": { "base": null, "refs": { + "BandwidthRateLimitInterval$StartHourOfDay": "

The hour of the day to start the bandwidth rate limit interval.

", + "BandwidthRateLimitInterval$EndHourOfDay": "

The hour of the day to end the bandwidth rate limit interval.

", "DescribeMaintenanceStartTimeOutput$HourOfDay": "

The hour component of the maintenance start time represented as hh, where hh is the hour (0 to 23). The hour of the day is in the time zone of the gateway.

", "DescribeSnapshotScheduleOutput$StartAt": "

The hour of the day at which the snapshot schedule begins represented as hh, where hh is the hour (0 to 23). The hour of the day is in the time zone of the gateway.

", "UpdateMaintenanceStartTimeInput$HourOfDay": "

The hour component of the maintenance start time represented as hh, where hh is the hour (00 to 23). The hour of the day is in the time zone of the gateway.

", @@ -1440,6 +1480,8 @@ "MinuteOfHour": { "base": null, "refs": { + "BandwidthRateLimitInterval$StartMinuteOfHour": "

The minute of the hour to start the bandwidth rate limit interval. The interval begins at the start of that minute. To begin an interval exactly at the start of the hour, use the value 0.

", + "BandwidthRateLimitInterval$EndMinuteOfHour": "

The minute of the hour to end the bandwidth rate limit interval.

The bandwidth rate limit interval ends at the end of the minute. To end an interval at the end of an hour, use the value 59.

", "DescribeMaintenanceStartTimeOutput$MinuteOfHour": "

The minute component of the maintenance start time represented as mm, where mm is the minute (0 to 59). The minute of the hour is in the time zone of the gateway.

", "UpdateMaintenanceStartTimeInput$MinuteOfHour": "

The minute component of the maintenance start time represented as mm, where mm is the minute (00 to 59). The minute of the hour is in the time zone of the gateway.

" } @@ -2131,6 +2173,16 @@ "refs": { } }, + "UpdateBandwidthRateLimitScheduleInput": { + "base": null, + "refs": { + } + }, + "UpdateBandwidthRateLimitScheduleOutput": { + "base": null, + "refs": { + } + }, "UpdateChapCredentialsInput": { "base": "

A JSON object containing one or more of the following fields:

", "refs": { diff --git a/models/apis/storagegateway/2013-06-30/paginators-1.json b/models/apis/storagegateway/2013-06-30/paginators-1.json index 78ba8622c24..921dfb4f541 100644 --- a/models/apis/storagegateway/2013-06-30/paginators-1.json +++ b/models/apis/storagegateway/2013-06-30/paginators-1.json @@ -57,6 +57,12 @@ "output_token": "Marker", "result_key": "Tags" }, + "ListTapePools": { + "input_token": "Marker", + "limit_key": "Limit", + "output_token": "Marker", + "result_key": "PoolInfos" + }, "ListTapes": { "input_token": "Marker", "limit_key": "Limit", diff --git a/service/datasync/api.go b/service/datasync/api.go index 2a4bda27ad4..43276deb712 100644 --- a/service/datasync/api.go +++ b/service/datasync/api.go @@ -2932,6 +2932,96 @@ func (c *DataSync) UpdateTaskWithContext(ctx aws.Context, input *UpdateTaskInput return out, req.Send() } +const opUpdateTaskExecution = "UpdateTaskExecution" + +// UpdateTaskExecutionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTaskExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTaskExecution for more information on using the UpdateTaskExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTaskExecutionRequest method. +// req, resp := client.UpdateTaskExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UpdateTaskExecution +func (c *DataSync) UpdateTaskExecutionRequest(input *UpdateTaskExecutionInput) (req *request.Request, output *UpdateTaskExecutionOutput) { + op := &request.Operation{ + Name: opUpdateTaskExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTaskExecutionInput{} + } + + output = &UpdateTaskExecutionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateTaskExecution API operation for AWS DataSync. +// +// Updates execution of a task. +// +// You can modify bandwidth throttling for a task execution that is running +// or queued. For more information, see Adjusting Bandwidth Throttling for a +// Task Execution (https://docs.aws.amazon.com/datasync/latest/working-with-task-executions.html#adjust-bandwidth-throttling). +// +// The only Option that can be modified by UpdateTaskExecution is BytesPerSecond +// (https://docs.aws.amazon.com/datasync/latest/userguide/API_Options.html#DataSync-Type-Options-BytesPerSecond) . +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation UpdateTaskExecution for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the AWS DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UpdateTaskExecution +func (c *DataSync) UpdateTaskExecution(input *UpdateTaskExecutionInput) (*UpdateTaskExecutionOutput, error) { + req, out := c.UpdateTaskExecutionRequest(input) + return out, req.Send() +} + +// UpdateTaskExecutionWithContext is the same as UpdateTaskExecution with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTaskExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) UpdateTaskExecutionWithContext(ctx aws.Context, input *UpdateTaskExecutionInput, opts ...request.Option) (*UpdateTaskExecutionOutput, error) { + req, out := c.UpdateTaskExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // Represents a single entry in a list of agents. AgentListEntry returns an // array that contains a list of agents when the ListAgents operation is called. type AgentListEntry struct { @@ -3510,7 +3600,7 @@ type CreateLocationNfsInput struct { // The path should be such that it can be mounted by other NFS clients in your // network. // - // To see all the paths exported by your NFS server. run "showmount -e nfs-server-name" + // To see all the paths exported by your NFS server, run "showmount -e nfs-server-name" // from an NFS client that has access to your server. You can specify any directory // that appears in the results, and any subdirectory of that directory. Ensure // that the NFS export is accessible without Kerberos authentication. @@ -3828,12 +3918,12 @@ type CreateLocationS3Input struct { _ struct{} `type:"structure"` // If you are using DataSync on an AWS Outpost, specify the Amazon Resource - // Names (ARNs) of the DataSync agents deployed on your AWS Outpost. For more - // information about launching a DataSync agent on an Amazon Outpost, see outposts-agent. + // Names (ARNs) of the DataSync agents deployed on your Outpost. For more information + // about launching a DataSync agent on an AWS Outpost, see outposts-agent. AgentArns []*string `min:"1" type:"list"` - // The Amazon Resource Name (ARN) of the Amazon S3 bucket. If the bucket is - // on an AWS Outpost, this must be an access point ARN. + // The ARN of the Amazon S3 bucket. If the bucket is on an AWS Outpost, this + // must be an access point ARN. // // S3BucketArn is a required field S3BucketArn *string `type:"string" required:"true"` @@ -3853,9 +3943,8 @@ type CreateLocationS3Input struct { // defaults to AWS S3 Outposts. // // For more information about S3 storage classes, see Amazon S3 Storage Classes - // (https://aws.amazon.com/s3/storage-classes/) in the Amazon Simple Storage - // Service Developer Guide. Some storage classes have behaviors that can affect - // your S3 storage cost. For detailed information, see using-storage-classes. + // (http://aws.amazon.com/s3/storage-classes/). Some storage classes have behaviors + // that can affect your S3 storage cost. For detailed information, see using-storage-classes. S3StorageClass *string `type:"string" enum:"S3StorageClass"` // A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 is @@ -4172,7 +4261,7 @@ type CreateTaskInput struct { // A list of filter rules that determines which files to exclude from a task. // The list should contain a single filter string that consists of the patterns // to exclude. The patterns are delimited by "|" (that is, a pipe), for example, - // "/folder1|/folder2" + // "/folder1|/folder2". Excludes []*FilterRule `type:"list"` // The name of a task. This value is a text reference that is used to identify @@ -5078,9 +5167,9 @@ func (s *DescribeLocationS3Input) SetLocationArn(v string) *DescribeLocationS3In type DescribeLocationS3Output struct { _ struct{} `type:"structure"` - // If you are using DataSync on an Amazon Outpost, the Amazon Resource Name - // (ARNs) of the EC2 agents deployed on your AWS Outpost. For more information - // about launching a DataSync agent on an Amazon Outpost, see outposts-agent. + // If you are using DataSync on an AWS Outpost, the Amazon Resource Name (ARNs) + // of the EC2 agents deployed on your Outpost. For more information about launching + // a DataSync agent on an AWS Outpost, see outposts-agent. AgentArns []*string `min:"1" type:"list"` // The time that the Amazon S3 bucket location was created. @@ -5101,10 +5190,9 @@ type DescribeLocationS3Output struct { // The Amazon S3 storage class that you chose to store your files in when this // location is used as a task destination. For more information about S3 storage - // classes, see Amazon S3 Storage Classes (https://aws.amazon.com/s3/storage-classes/) - // in the Amazon Simple Storage Service Developer Guide. Some storage classes - // have behaviors that can affect your S3 storage cost. For detailed information, - // see using-storage-classes. + // classes, see Amazon S3 Storage Classes (http://aws.amazon.com/s3/storage-classes/). + // Some storage classes have behaviors that can affect your S3 storage cost. + // For detailed information, see using-storage-classes. S3StorageClass *string `type:"string" enum:"S3StorageClass"` } @@ -7229,13 +7317,13 @@ type TaskExecutionResultDetail struct { // phase. TransferDuration *int64 `type:"long"` - // The status of the TRANSFERRING Phase. + // The status of the TRANSFERRING phase. TransferStatus *string `type:"string" enum:"PhaseStatus"` // The total time in milliseconds that AWS DataSync spent in the VERIFYING phase. VerifyDuration *int64 `type:"long"` - // The status of the VERIFYING Phase. + // The status of the VERIFYING phase. VerifyStatus *string `type:"string" enum:"PhaseStatus"` } @@ -7597,6 +7685,86 @@ func (s UpdateAgentOutput) GoString() string { return s.String() } +type UpdateTaskExecutionInput struct { + _ struct{} `type:"structure"` + + // Represents the options that are available to control the behavior of a StartTaskExecution + // operation. Behavior includes preserving metadata such as user ID (UID), group + // ID (GID), and file permissions, and also overwriting files in the destination, + // data integrity verification, and so on. + // + // A task has a set of default options associated with it. If you don't specify + // an option in StartTaskExecution, the default value is used. You can override + // the defaults options on each task execution by specifying an overriding Options + // value to StartTaskExecution. + // + // Options is a required field + Options *Options `type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the specific task execution that is being + // updated. + // + // TaskExecutionArn is a required field + TaskExecutionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateTaskExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTaskExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTaskExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTaskExecutionInput"} + if s.Options == nil { + invalidParams.Add(request.NewErrParamRequired("Options")) + } + if s.TaskExecutionArn == nil { + invalidParams.Add(request.NewErrParamRequired("TaskExecutionArn")) + } + if s.Options != nil { + if err := s.Options.Validate(); err != nil { + invalidParams.AddNested("Options", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOptions sets the Options field's value. +func (s *UpdateTaskExecutionInput) SetOptions(v *Options) *UpdateTaskExecutionInput { + s.Options = v + return s +} + +// SetTaskExecutionArn sets the TaskExecutionArn field's value. +func (s *UpdateTaskExecutionInput) SetTaskExecutionArn(v string) *UpdateTaskExecutionInput { + s.TaskExecutionArn = &v + return s +} + +type UpdateTaskExecutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateTaskExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTaskExecutionOutput) GoString() string { + return s.String() +} + // UpdateTaskResponse type UpdateTaskInput struct { _ struct{} `type:"structure"` diff --git a/service/datasync/datasynciface/interface.go b/service/datasync/datasynciface/interface.go index ead86827a4a..a457e047387 100644 --- a/service/datasync/datasynciface/interface.go +++ b/service/datasync/datasynciface/interface.go @@ -198,6 +198,10 @@ type DataSyncAPI interface { UpdateTask(*datasync.UpdateTaskInput) (*datasync.UpdateTaskOutput, error) UpdateTaskWithContext(aws.Context, *datasync.UpdateTaskInput, ...request.Option) (*datasync.UpdateTaskOutput, error) UpdateTaskRequest(*datasync.UpdateTaskInput) (*request.Request, *datasync.UpdateTaskOutput) + + UpdateTaskExecution(*datasync.UpdateTaskExecutionInput) (*datasync.UpdateTaskExecutionOutput, error) + UpdateTaskExecutionWithContext(aws.Context, *datasync.UpdateTaskExecutionInput, ...request.Option) (*datasync.UpdateTaskExecutionOutput, error) + UpdateTaskExecutionRequest(*datasync.UpdateTaskExecutionInput) (*request.Request, *datasync.UpdateTaskExecutionOutput) } var _ DataSyncAPI = (*datasync.DataSync)(nil) diff --git a/service/dynamodb/api.go b/service/dynamodb/api.go index 8db8d545c02..f988e1fbdb3 100644 --- a/service/dynamodb/api.go +++ b/service/dynamodb/api.go @@ -1776,6 +1776,102 @@ func (d *discovererDescribeEndpoints) Handler(r *request.Request) { } } +const opDescribeExport = "DescribeExport" + +// DescribeExportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExport for more information on using the DescribeExport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeExportRequest method. +// req, resp := client.DescribeExportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport +func (c *DynamoDB) DescribeExportRequest(input *DescribeExportInput) (req *request.Request, output *DescribeExportOutput) { + op := &request.Operation{ + Name: opDescribeExport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportInput{} + } + + output = &DescribeExportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExport API operation for Amazon DynamoDB. +// +// Describes an existing table export. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeExport for usage and error information. +// +// Returned Error Types: +// * ExportNotFoundException +// The specified export was not found. +// +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport +func (c *DynamoDB) DescribeExport(input *DescribeExportInput) (*DescribeExportOutput, error) { + req, out := c.DescribeExportRequest(input) + return out, req.Send() +} + +// DescribeExportWithContext is the same as DescribeExport with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeExportWithContext(ctx aws.Context, input *DescribeExportInput, opts ...request.Option) (*DescribeExportOutput, error) { + req, out := c.DescribeExportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeGlobalTable = "DescribeGlobalTable" // DescribeGlobalTableRequest generates a "aws/request.Request" representing the @@ -2469,6 +2565,114 @@ func (c *DynamoDB) DescribeTimeToLiveWithContext(ctx aws.Context, input *Describ return out, req.Send() } +const opExportTableToPointInTime = "ExportTableToPointInTime" + +// ExportTableToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the ExportTableToPointInTime operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportTableToPointInTime for more information on using the ExportTableToPointInTime +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportTableToPointInTimeRequest method. +// req, resp := client.ExportTableToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTimeInput) (req *request.Request, output *ExportTableToPointInTimeOutput) { + op := &request.Operation{ + Name: opExportTableToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportTableToPointInTimeInput{} + } + + output = &ExportTableToPointInTimeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportTableToPointInTime API operation for Amazon DynamoDB. +// +// Exports table data to an S3 bucket. The table must have point in time recovery +// enabled, and you can export data from any time within the point in time recovery +// window. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ExportTableToPointInTime for usage and error information. +// +// Returned Error Types: +// * TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// * PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. +// +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. +// +// * InvalidExportTimeException +// The specified ExportTime is outside of the point in time recovery window. +// +// * ExportConflictException +// There was a conflict when writing to the specified S3 bucket. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTime(input *ExportTableToPointInTimeInput) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) + return out, req.Send() +} + +// ExportTableToPointInTimeWithContext is the same as ExportTableToPointInTime with the addition of +// the ability to pass a context and additional request options. +// +// See ExportTableToPointInTime for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ExportTableToPointInTimeWithContext(ctx aws.Context, input *ExportTableToPointInTimeInput, opts ...request.Option) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetItem = "GetItem" // GetItemRequest generates a "aws/request.Request" representing the @@ -2851,140 +3055,291 @@ func (c *DynamoDB) ListContributorInsightsPagesWithContext(ctx aws.Context, inpu return p.Err() } -const opListGlobalTables = "ListGlobalTables" +const opListExports = "ListExports" -// ListGlobalTablesRequest generates a "aws/request.Request" representing the -// client's request for the ListGlobalTables operation. The "output" return +// ListExportsRequest generates a "aws/request.Request" representing the +// client's request for the ListExports operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListGlobalTables for more information on using the ListGlobalTables +// See ListExports for more information on using the ListExports // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListGlobalTablesRequest method. -// req, resp := client.ListGlobalTablesRequest(params) +// // Example sending a request using the ListExportsRequest method. +// req, resp := client.ListExportsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables -func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Request, output *ListExportsOutput) { op := &request.Operation{ - Name: opListGlobalTables, + Name: opListExports, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListGlobalTablesInput{} + input = &ListExportsInput{} } - output = &ListGlobalTablesOutput{} + output = &ListExportsOutput{} req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } return } -// ListGlobalTables API operation for Amazon DynamoDB. -// -// Lists all global tables that have a replica in the specified Region. +// ListExports API operation for Amazon DynamoDB. // -// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. +// Lists completed exports within the past 90 days. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ListGlobalTables for usage and error information. +// API operation ListExports for usage and error information. // // Returned Error Types: +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. +// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables -func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { - req, out := c.ListGlobalTablesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExports(input *ListExportsInput) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) return out, req.Send() } -// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of +// ListExportsWithContext is the same as ListExports with the addition of // the ability to pass a context and additional request options. // -// See ListGlobalTables for details on how to use this API operation. +// See ListExports for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { - req, out := c.ListGlobalTablesRequest(input) +func (c *DynamoDB) ListExportsWithContext(ctx aws.Context, input *ListExportsInput, opts ...request.Option) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTables = "ListTables" - -// ListTablesRequest generates a "aws/request.Request" representing the -// client's request for the ListTables operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTables for more information on using the ListTables -// API call, and error handling. +// ListExportsPages iterates over the pages of a ListExports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// See ListExports method for more information on how to use this operation. // +// Note: This operation can generate multiple requests to a service. // -// // Example sending a request using the ListTablesRequest method. -// req, resp := client.ListTablesRequest(params) +// // Example iterating over at most 3 pages of a ListExports operation. +// pageNum := 0 +// err := client.ListExportsPages(params, +// func(page *dynamodb.ListExportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +func (c *DynamoDB) ListExportsPages(input *ListExportsInput, fn func(*ListExportsOutput, bool) bool) error { + return c.ListExportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListExportsPagesWithContext same as ListExportsPages except +// it takes a Context and allows setting request options on the pages. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables -func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListExportsPagesWithContext(ctx aws.Context, input *ListExportsInput, fn func(*ListExportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListExportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListExportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListGlobalTables = "ListGlobalTables" + +// ListGlobalTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListGlobalTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGlobalTables for more information on using the ListGlobalTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListGlobalTablesRequest method. +// req, resp := client.ListGlobalTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { + op := &request.Operation{ + Name: opListGlobalTables, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListGlobalTablesInput{} + } + + output = &ListGlobalTablesOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListGlobalTables API operation for Amazon DynamoDB. +// +// Lists all global tables that have a replica in the specified Region. +// +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListGlobalTables for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + return out, req.Send() +} + +// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of +// the ability to pass a context and additional request options. +// +// See ListGlobalTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTables = "ListTables" + +// ListTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTables for more information on using the ListTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTablesRequest method. +// req, resp := client.ListTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { op := &request.Operation{ Name: opListTables, HTTPMethod: "POST", @@ -8064,7 +8419,7 @@ func (s *ContinuousBackupsUnavailableException) RequestID() string { return s.RespMetadata.RequestID } -// Represents a Contributor Insights summary entry.. +// Represents a Contributor Insights summary entry. type ContributorInsightsSummary struct { _ struct{} `type:"structure"` @@ -9800,6 +10155,70 @@ func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpoints return s } +type DescribeExportInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the export. + // + // ExportArn is a required field + ExportArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeExportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportInput"} + if s.ExportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ExportArn")) + } + if s.ExportArn != nil && len(*s.ExportArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ExportArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExportArn sets the ExportArn field's value. +func (s *DescribeExportInput) SetExportArn(v string) *DescribeExportInput { + s.ExportArn = &v + return s +} + +type DescribeExportOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the export. + ExportDescription *ExportDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeExportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportOutput) GoString() string { + return s.String() +} + +// SetExportDescription sets the ExportDescription field's value. +func (s *DescribeExportOutput) SetExportDescription(v *ExportDescription) *DescribeExportOutput { + s.ExportDescription = v + return s +} + type DescribeGlobalTableInput struct { _ struct{} `type:"structure"` @@ -10392,71 +10811,576 @@ type ExpectedAttributeValue struct { // not compare to {"NS":["6", "2", "1"]} ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` - // Causes DynamoDB to evaluate the value before attempting a conditional operation: - // - // * If Exists is true, DynamoDB will check to see if that attribute value - // already exists in the table. If it is found, then the operation succeeds. - // If it is not found, the operation fails with a ConditionCheckFailedException. + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // + // * If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the operation succeeds. + // If it is not found, the operation fails with a ConditionCheckFailedException. + // + // * If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the operation succeeds. If the value is found, despite the + // assumption that it does not exist, the operation fails with a ConditionCheckFailedException. + // + // The default setting for Exists is true. If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true, + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // + // * Exists is true but there is no Value to check. (You expect a value to + // exist, but don't specify what that value is.) + // + // * Exists is false but you also provide a Value. (You cannot expect an + // attribute to have a value, while also expecting it not to exist.) + Exists *bool `type:"boolean"` + + // Represents the data for the expected attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) + // in the Amazon DynamoDB Developer Guide. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s ExpectedAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpectedAttributeValue) GoString() string { + return s.String() +} + +// SetAttributeValueList sets the AttributeValueList field's value. +func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue { + s.AttributeValueList = v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue { + s.ComparisonOperator = &v + return s +} + +// SetExists sets the Exists field's value. +func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue { + s.Exists = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue { + s.Value = v + return s +} + +// There was a conflict when writing to the specified S3 bucket. +type ExportConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ExportConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportConflictException) GoString() string { + return s.String() +} + +func newErrorExportConflictException(v protocol.ResponseMetadata) error { + return &ExportConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExportConflictException) Code() string { + return "ExportConflictException" +} + +// Message returns the exception's message. +func (s *ExportConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExportConflictException) OrigErr() error { + return nil +} + +func (s *ExportConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExportConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExportConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Represents the properties of the exported table. +type ExportDescription struct { + _ struct{} `type:"structure"` + + // The billable size of the table export. + BilledSizeBytes *int64 `type:"long"` + + // The client token that was provided for the export task. A client token makes + // calls to ExportTableToPointInTimeInput idempotent, meaning that multiple + // identical calls have the same effect as one single call. + ClientToken *string `type:"string"` + + // The time at which the export task completed. + EndTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the table export. + ExportArn *string `min:"37" type:"string"` + + // The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON + // or ION. + ExportFormat *string `type:"string" enum:"ExportFormat"` + + // The name of the manifest file for the export task. + ExportManifest *string `type:"string"` + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + // FAILED. + ExportStatus *string `type:"string" enum:"ExportStatus"` + + // Point in time from which table data was exported. + ExportTime *time.Time `type:"timestamp"` + + // Status code for the result of the failed export. + FailureCode *string `type:"string"` + + // Export failure reason description. + FailureMessage *string `type:"string"` + + // The number of items exported. + ItemCount *int64 `type:"long"` + + // The name of the Amazon S3 bucket containing the export. + S3Bucket *string `type:"string"` + + // The ID of the AWS account that owns the bucket containing the export. + S3BucketOwner *string `type:"string"` + + // The Amazon S3 bucket prefix used as the file name and path of the exported + // snapshot. + S3Prefix *string `type:"string"` + + // Type of encryption used on the bucket where export data is stored. Valid + // values for S3SseAlgorithm are: + // + // * AES256 - server-side encryption with Amazon S3 managed keys + // + // * KMS - server-side encryption with AWS KMS managed keys + S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` + + // The ID of the AWS KMS managed key used to encrypt the S3 bucket where export + // data is stored (if applicable). + S3SseKmsKeyId *string `min:"1" type:"string"` + + // The time at which the export task began. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the table that was exported. + TableArn *string `type:"string"` + + // Unique ID of the table that was exported. + TableId *string `type:"string"` +} + +// String returns the string representation +func (s ExportDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportDescription) GoString() string { + return s.String() +} + +// SetBilledSizeBytes sets the BilledSizeBytes field's value. +func (s *ExportDescription) SetBilledSizeBytes(v int64) *ExportDescription { + s.BilledSizeBytes = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportDescription) SetClientToken(v string) *ExportDescription { + s.ClientToken = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ExportDescription) SetEndTime(v time.Time) *ExportDescription { + s.EndTime = &v + return s +} + +// SetExportArn sets the ExportArn field's value. +func (s *ExportDescription) SetExportArn(v string) *ExportDescription { + s.ExportArn = &v + return s +} + +// SetExportFormat sets the ExportFormat field's value. +func (s *ExportDescription) SetExportFormat(v string) *ExportDescription { + s.ExportFormat = &v + return s +} + +// SetExportManifest sets the ExportManifest field's value. +func (s *ExportDescription) SetExportManifest(v string) *ExportDescription { + s.ExportManifest = &v + return s +} + +// SetExportStatus sets the ExportStatus field's value. +func (s *ExportDescription) SetExportStatus(v string) *ExportDescription { + s.ExportStatus = &v + return s +} + +// SetExportTime sets the ExportTime field's value. +func (s *ExportDescription) SetExportTime(v time.Time) *ExportDescription { + s.ExportTime = &v + return s +} + +// SetFailureCode sets the FailureCode field's value. +func (s *ExportDescription) SetFailureCode(v string) *ExportDescription { + s.FailureCode = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *ExportDescription) SetFailureMessage(v string) *ExportDescription { + s.FailureMessage = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *ExportDescription) SetItemCount(v int64) *ExportDescription { + s.ItemCount = &v + return s +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportDescription) SetS3Bucket(v string) *ExportDescription { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *ExportDescription) SetS3BucketOwner(v string) *ExportDescription { + s.S3BucketOwner = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportDescription) SetS3Prefix(v string) *ExportDescription { + s.S3Prefix = &v + return s +} + +// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. +func (s *ExportDescription) SetS3SseAlgorithm(v string) *ExportDescription { + s.S3SseAlgorithm = &v + return s +} + +// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. +func (s *ExportDescription) SetS3SseKmsKeyId(v string) *ExportDescription { + s.S3SseKmsKeyId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ExportDescription) SetStartTime(v time.Time) *ExportDescription { + s.StartTime = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ExportDescription) SetTableArn(v string) *ExportDescription { + s.TableArn = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *ExportDescription) SetTableId(v string) *ExportDescription { + s.TableId = &v + return s +} + +// The specified export was not found. +type ExportNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ExportNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportNotFoundException) GoString() string { + return s.String() +} + +func newErrorExportNotFoundException(v protocol.ResponseMetadata) error { + return &ExportNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExportNotFoundException) Code() string { + return "ExportNotFoundException" +} + +// Message returns the exception's message. +func (s *ExportNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExportNotFoundException) OrigErr() error { + return nil +} + +func (s *ExportNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExportNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExportNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Summary information about an export task. +type ExportSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the export. + ExportArn *string `min:"37" type:"string"` + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + // FAILED. + ExportStatus *string `type:"string" enum:"ExportStatus"` +} + +// String returns the string representation +func (s ExportSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportSummary) GoString() string { + return s.String() +} + +// SetExportArn sets the ExportArn field's value. +func (s *ExportSummary) SetExportArn(v string) *ExportSummary { + s.ExportArn = &v + return s +} + +// SetExportStatus sets the ExportStatus field's value. +func (s *ExportSummary) SetExportStatus(v string) *ExportSummary { + s.ExportStatus = &v + return s +} + +type ExportTableToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientToken makes the call to ExportTableToPointInTimeInput idempotent, + // meaning that multiple identical calls have the same effect as one single + // call. // - // * If Exists is false, DynamoDB assumes that the attribute value does not - // exist in the table. If in fact the value does not exist, then the assumption - // is valid and the operation succeeds. If the value is found, despite the - // assumption that it does not exist, the operation fails with a ConditionCheckFailedException. + // A client token is valid for 8 hours after the first request that uses it + // is completed. After 8 hours, any request with the same client token is treated + // as a new request. Do not resubmit the same request with the same client token + // for more than 8 hours, or the result might not be idempotent. // - // The default setting for Exists is true. If you supply a Value all by itself, - // DynamoDB assumes the attribute exists: You don't have to set Exists to true, - // because it is implied. + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch + // exception. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON + // or ION. + ExportFormat *string `type:"string" enum:"ExportFormat"` + + // Time in the past from which to export table data. The table export will be + // a snapshot of the table's state at this point in time. + ExportTime *time.Time `type:"timestamp"` + + // The name of the Amazon S3 bucket to export the snapshot to. // - // DynamoDB returns a ValidationException if: + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The ID of the AWS account that owns the bucket the export will be stored + // in. + S3BucketOwner *string `type:"string"` + + // The Amazon S3 bucket prefix to use as the file name and path of the exported + // snapshot. + S3Prefix *string `type:"string"` + + // Type of encryption used on the bucket where export data will be stored. Valid + // values for S3SseAlgorithm are: // - // * Exists is true but there is no Value to check. (You expect a value to - // exist, but don't specify what that value is.) + // * AES256 - server-side encryption with Amazon S3 managed keys // - // * Exists is false but you also provide a Value. (You cannot expect an - // attribute to have a value, while also expecting it not to exist.) - Exists *bool `type:"boolean"` + // * KMS - server-side encryption with AWS KMS managed keys + S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` - // Represents the data for the expected attribute. - // - // Each attribute value is described as a name-value pair. The name is the data - // type, and the value is the data itself. + // The ID of the AWS KMS managed key used to encrypt the S3 bucket where export + // data will be stored (if applicable). + S3SseKmsKeyId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) associated with the table to export. // - // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) - // in the Amazon DynamoDB Developer Guide. - Value *AttributeValue `type:"structure"` + // TableArn is a required field + TableArn *string `type:"string" required:"true"` } // String returns the string representation -func (s ExpectedAttributeValue) String() string { +func (s ExportTableToPointInTimeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExpectedAttributeValue) GoString() string { +func (s ExportTableToPointInTimeInput) GoString() string { return s.String() } -// SetAttributeValueList sets the AttributeValueList field's value. -func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue { - s.AttributeValueList = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTableToPointInTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTableToPointInTimeInput"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + if s.S3SseKmsKeyId != nil && len(*s.S3SseKmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3SseKmsKeyId", 1)) + } + if s.TableArn == nil { + invalidParams.Add(request.NewErrParamRequired("TableArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportTableToPointInTimeInput) SetClientToken(v string) *ExportTableToPointInTimeInput { + s.ClientToken = &v return s } -// SetComparisonOperator sets the ComparisonOperator field's value. -func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue { - s.ComparisonOperator = &v +// SetExportFormat sets the ExportFormat field's value. +func (s *ExportTableToPointInTimeInput) SetExportFormat(v string) *ExportTableToPointInTimeInput { + s.ExportFormat = &v return s } -// SetExists sets the Exists field's value. -func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue { - s.Exists = &v +// SetExportTime sets the ExportTime field's value. +func (s *ExportTableToPointInTimeInput) SetExportTime(v time.Time) *ExportTableToPointInTimeInput { + s.ExportTime = &v return s } -// SetValue sets the Value field's value. -func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue { - s.Value = v +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTableToPointInTimeInput) SetS3Bucket(v string) *ExportTableToPointInTimeInput { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *ExportTableToPointInTimeInput) SetS3BucketOwner(v string) *ExportTableToPointInTimeInput { + s.S3BucketOwner = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTableToPointInTimeInput) SetS3Prefix(v string) *ExportTableToPointInTimeInput { + s.S3Prefix = &v + return s +} + +// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. +func (s *ExportTableToPointInTimeInput) SetS3SseAlgorithm(v string) *ExportTableToPointInTimeInput { + s.S3SseAlgorithm = &v + return s +} + +// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. +func (s *ExportTableToPointInTimeInput) SetS3SseKmsKeyId(v string) *ExportTableToPointInTimeInput { + s.S3SseKmsKeyId = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ExportTableToPointInTimeInput) SetTableArn(v string) *ExportTableToPointInTimeInput { + s.TableArn = &v + return s +} + +type ExportTableToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Contains a description of the table export. + ExportDescription *ExportDescription `type:"structure"` +} + +// String returns the string representation +func (s ExportTableToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTableToPointInTimeOutput) GoString() string { + return s.String() +} + +// SetExportDescription sets the ExportDescription field's value. +func (s *ExportTableToPointInTimeOutput) SetExportDescription(v *ExportDescription) *ExportTableToPointInTimeOutput { + s.ExportDescription = v return s } @@ -11709,6 +12633,62 @@ func (s *InternalServerError) RequestID() string { return s.RespMetadata.RequestID } +// The specified ExportTime is outside of the point in time recovery window. +type InvalidExportTimeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidExportTimeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidExportTimeException) GoString() string { + return s.String() +} + +func newErrorInvalidExportTimeException(v protocol.ResponseMetadata) error { + return &InvalidExportTimeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidExportTimeException) Code() string { + return "InvalidExportTimeException" +} + +// Message returns the exception's message. +func (s *InvalidExportTimeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidExportTimeException) OrigErr() error { + return nil +} + +func (s *InvalidExportTimeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidExportTimeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidExportTimeException) RequestID() string { + return s.RespMetadata.RequestID +} + // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime // and LatestRestorableDateTime. type InvalidRestoreTimeException struct { @@ -12402,6 +13382,95 @@ func (s *ListContributorInsightsOutput) SetNextToken(v string) *ListContributorI return s } +type ListExportsInput struct { + _ struct{} `type:"structure"` + + // Maximum number of results to return per page. + MaxResults *int64 `min:"1" type:"integer"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListExports. When provided in this manner, the API fetches + // the next page of results. + NextToken *string `type:"string"` + + // The Amazon Resource Name (ARN) associated with the exported table. + TableArn *string `type:"string"` +} + +// String returns the string representation +func (s ListExportsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListExportsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListExportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListExportsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListExportsInput) SetMaxResults(v int64) *ListExportsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListExportsInput) SetNextToken(v string) *ListExportsInput { + s.NextToken = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ListExportsInput) SetTableArn(v string) *ListExportsInput { + s.TableArn = &v + return s +} + +type ListExportsOutput struct { + _ struct{} `type:"structure"` + + // A list of ExportSummary objects. + ExportSummaries []*ExportSummary `type:"list"` + + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListExports again, with NextToken set to this value. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListExportsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListExportsOutput) GoString() string { + return s.String() +} + +// SetExportSummaries sets the ExportSummaries field's value. +func (s *ListExportsOutput) SetExportSummaries(v []*ExportSummary) *ListExportsOutput { + s.ExportSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListExportsOutput) SetNextToken(v string) *ListExportsOutput { + s.NextToken = &v + return s +} + type ListGlobalTablesInput struct { _ struct{} `type:"structure"` @@ -20132,6 +21201,42 @@ func ContributorInsightsStatus_Values() []string { } } +const ( + // ExportFormatDynamodbJson is a ExportFormat enum value + ExportFormatDynamodbJson = "DYNAMODB_JSON" + + // ExportFormatIon is a ExportFormat enum value + ExportFormatIon = "ION" +) + +// ExportFormat_Values returns all elements of the ExportFormat enum +func ExportFormat_Values() []string { + return []string{ + ExportFormatDynamodbJson, + ExportFormatIon, + } +} + +const ( + // ExportStatusInProgress is a ExportStatus enum value + ExportStatusInProgress = "IN_PROGRESS" + + // ExportStatusCompleted is a ExportStatus enum value + ExportStatusCompleted = "COMPLETED" + + // ExportStatusFailed is a ExportStatus enum value + ExportStatusFailed = "FAILED" +) + +// ExportStatus_Values returns all elements of the ExportStatus enum +func ExportStatus_Values() []string { + return []string{ + ExportStatusInProgress, + ExportStatusCompleted, + ExportStatusFailed, + } +} + const ( // GlobalTableStatusCreating is a GlobalTableStatus enum value GlobalTableStatusCreating = "CREATING" @@ -20361,6 +21466,22 @@ func ReturnValuesOnConditionCheckFailure_Values() []string { } } +const ( + // S3SseAlgorithmAes256 is a S3SseAlgorithm enum value + S3SseAlgorithmAes256 = "AES256" + + // S3SseAlgorithmKms is a S3SseAlgorithm enum value + S3SseAlgorithmKms = "KMS" +) + +// S3SseAlgorithm_Values returns all elements of the S3SseAlgorithm enum +func S3SseAlgorithm_Values() []string { + return []string{ + S3SseAlgorithmAes256, + S3SseAlgorithmKms, + } +} + const ( // SSEStatusEnabling is a SSEStatus enum value SSEStatusEnabling = "ENABLING" diff --git a/service/dynamodb/dynamodbiface/interface.go b/service/dynamodb/dynamodbiface/interface.go index 1ba6dbfe245..d92f770ca1b 100644 --- a/service/dynamodb/dynamodbiface/interface.go +++ b/service/dynamodb/dynamodbiface/interface.go @@ -111,6 +111,10 @@ type DynamoDBAPI interface { DescribeEndpointsWithContext(aws.Context, *dynamodb.DescribeEndpointsInput, ...request.Option) (*dynamodb.DescribeEndpointsOutput, error) DescribeEndpointsRequest(*dynamodb.DescribeEndpointsInput) (*request.Request, *dynamodb.DescribeEndpointsOutput) + DescribeExport(*dynamodb.DescribeExportInput) (*dynamodb.DescribeExportOutput, error) + DescribeExportWithContext(aws.Context, *dynamodb.DescribeExportInput, ...request.Option) (*dynamodb.DescribeExportOutput, error) + DescribeExportRequest(*dynamodb.DescribeExportInput) (*request.Request, *dynamodb.DescribeExportOutput) + DescribeGlobalTable(*dynamodb.DescribeGlobalTableInput) (*dynamodb.DescribeGlobalTableOutput, error) DescribeGlobalTableWithContext(aws.Context, *dynamodb.DescribeGlobalTableInput, ...request.Option) (*dynamodb.DescribeGlobalTableOutput, error) DescribeGlobalTableRequest(*dynamodb.DescribeGlobalTableInput) (*request.Request, *dynamodb.DescribeGlobalTableOutput) @@ -135,6 +139,10 @@ type DynamoDBAPI interface { DescribeTimeToLiveWithContext(aws.Context, *dynamodb.DescribeTimeToLiveInput, ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error) DescribeTimeToLiveRequest(*dynamodb.DescribeTimeToLiveInput) (*request.Request, *dynamodb.DescribeTimeToLiveOutput) + ExportTableToPointInTime(*dynamodb.ExportTableToPointInTimeInput) (*dynamodb.ExportTableToPointInTimeOutput, error) + ExportTableToPointInTimeWithContext(aws.Context, *dynamodb.ExportTableToPointInTimeInput, ...request.Option) (*dynamodb.ExportTableToPointInTimeOutput, error) + ExportTableToPointInTimeRequest(*dynamodb.ExportTableToPointInTimeInput) (*request.Request, *dynamodb.ExportTableToPointInTimeOutput) + GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) GetItemWithContext(aws.Context, *dynamodb.GetItemInput, ...request.Option) (*dynamodb.GetItemOutput, error) GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput) @@ -150,6 +158,13 @@ type DynamoDBAPI interface { ListContributorInsightsPages(*dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool) error ListContributorInsightsPagesWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool, ...request.Option) error + ListExports(*dynamodb.ListExportsInput) (*dynamodb.ListExportsOutput, error) + ListExportsWithContext(aws.Context, *dynamodb.ListExportsInput, ...request.Option) (*dynamodb.ListExportsOutput, error) + ListExportsRequest(*dynamodb.ListExportsInput) (*request.Request, *dynamodb.ListExportsOutput) + + ListExportsPages(*dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool) error + ListExportsPagesWithContext(aws.Context, *dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool, ...request.Option) error + ListGlobalTables(*dynamodb.ListGlobalTablesInput) (*dynamodb.ListGlobalTablesOutput, error) ListGlobalTablesWithContext(aws.Context, *dynamodb.ListGlobalTablesInput, ...request.Option) (*dynamodb.ListGlobalTablesOutput, error) ListGlobalTablesRequest(*dynamodb.ListGlobalTablesInput) (*request.Request, *dynamodb.ListGlobalTablesOutput) diff --git a/service/dynamodb/errors.go b/service/dynamodb/errors.go index b7e2d40b21d..8a9f3485dfd 100644 --- a/service/dynamodb/errors.go +++ b/service/dynamodb/errors.go @@ -33,6 +33,18 @@ const ( // Backups have not yet been enabled for this table. ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException" + // ErrCodeExportConflictException for service response error code + // "ExportConflictException". + // + // There was a conflict when writing to the specified S3 bucket. + ErrCodeExportConflictException = "ExportConflictException" + + // ErrCodeExportNotFoundException for service response error code + // "ExportNotFoundException". + // + // The specified export was not found. + ErrCodeExportNotFoundException = "ExportNotFoundException" + // ErrCodeGlobalTableAlreadyExistsException for service response error code // "GlobalTableAlreadyExistsException". // @@ -64,6 +76,12 @@ const ( // An error occurred on the server side. ErrCodeInternalServerError = "InternalServerError" + // ErrCodeInvalidExportTimeException for service response error code + // "InvalidExportTimeException". + // + // The specified ExportTime is outside of the point in time recovery window. + ErrCodeInvalidExportTimeException = "InvalidExportTimeException" + // ErrCodeInvalidRestoreTimeException for service response error code // "InvalidRestoreTimeException". // @@ -274,11 +292,14 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "BackupNotFoundException": newErrorBackupNotFoundException, "ConditionalCheckFailedException": newErrorConditionalCheckFailedException, "ContinuousBackupsUnavailableException": newErrorContinuousBackupsUnavailableException, + "ExportConflictException": newErrorExportConflictException, + "ExportNotFoundException": newErrorExportNotFoundException, "GlobalTableAlreadyExistsException": newErrorGlobalTableAlreadyExistsException, "GlobalTableNotFoundException": newErrorGlobalTableNotFoundException, "IdempotentParameterMismatchException": newErrorIdempotentParameterMismatchException, "IndexNotFoundException": newErrorIndexNotFoundException, "InternalServerError": newErrorInternalServerError, + "InvalidExportTimeException": newErrorInvalidExportTimeException, "InvalidRestoreTimeException": newErrorInvalidRestoreTimeException, "ItemCollectionSizeLimitExceededException": newErrorItemCollectionSizeLimitExceededException, "LimitExceededException": newErrorLimitExceededException, diff --git a/service/ecs/api.go b/service/ecs/api.go index d9f695785b2..89c5bd9a42f 100644 --- a/service/ecs/api.go +++ b/service/ecs/api.go @@ -5803,8 +5803,9 @@ func (s *AttachmentStateChange) SetStatus(v string) *AttachmentStateChange { type Attribute struct { _ struct{} `type:"structure"` - // The name of the attribute. Up to 128 letters (uppercase and lowercase), numbers, - // hyphens, underscores, and periods are allowed. + // The name of the attribute. The name must contain between 1 and 128 characters + // and name may contain letters (uppercase and lowercase), numbers, hyphens, + // underscores, forward slashes, back slashes, or periods. // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` @@ -5818,9 +5819,10 @@ type Attribute struct { // ARN. TargetType *string `locationName:"targetType" type:"string" enum:"TargetType"` - // The value of the attribute. Up to 128 letters (uppercase and lowercase), - // numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons, - // and spaces are allowed. + // The value of the attribute. The value must contain between 1 and 128 characters + // and may contain letters (uppercase and lowercase), numbers, hyphens, underscores, + // periods, at signs (@), forward slashes, back slashes, colons, or spaces. + // The value cannot contain any leading or trailing whitespace. Value *string `locationName:"value" type:"string"` } @@ -6014,15 +6016,15 @@ type AwsVpcConfiguration struct { // The default value is DISABLED. AssignPublicIp *string `locationName:"assignPublicIp" type:"string" enum:"AssignPublicIp"` - // The security groups associated with the task or service. If you do not specify - // a security group, the default security group for the VPC is used. There is - // a limit of 5 security groups that can be specified per AwsVpcConfiguration. + // The IDs of the security groups associated with the task or service. If you + // do not specify a security group, the default security group for the VPC is + // used. There is a limit of 5 security groups that can be specified per AwsVpcConfiguration. // // All specified security groups must be from the same VPC. SecurityGroups []*string `locationName:"securityGroups" type:"list"` - // The subnets associated with the task or service. There is a limit of 16 subnets - // that can be specified per AwsVpcConfiguration. + // The IDs of the subnets associated with the task or service. There is a limit + // of 16 subnets that can be specified per AwsVpcConfiguration. // // All specified subnets must be from the same VPC. // @@ -7054,7 +7056,7 @@ type ContainerDefinition struct { // The command that is passed to the container. This parameter maps to Cmd in // the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the COMMAND parameter to docker run (https://docs.docker.com/engine/reference/run/). + // and the COMMAND parameter to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // For more information, see https://docs.docker.com/engine/reference/builder/#cmd // (https://docs.docker.com/engine/reference/builder/#cmd). If there are multiple // arguments, each argument should be a separated string in the array. @@ -7063,7 +7065,7 @@ type ContainerDefinition struct { // The number of cpu units reserved for the container. This parameter maps to // CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // This field is optional for tasks using the Fargate launch type, and the only // requirement is that the total amount of CPU reserved for all containers within @@ -7104,7 +7106,8 @@ type ContainerDefinition struct { // // On Windows container instances, the CPU limit is enforced as an absolute // limit, or a quota. Windows containers only have access to the specified amount - // of CPU that is described in the task definition. + // of CPU that is described in the task definition. A null or zero CPU value + // is passed to Docker as 0, which Windows interprets as 1% of one CPU. Cpu *int64 `locationName:"cpu" type:"integer"` // The dependencies defined for container startup and shutdown. A container @@ -7132,29 +7135,32 @@ type ContainerDefinition struct { // This parameter maps to NetworkDisabled in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/). // - // This parameter is not supported for Windows containers. + // This parameter is not supported for Windows containers or tasks that use + // the awsvpc network mode. DisableNetworking *bool `locationName:"disableNetworking" type:"boolean"` // A list of DNS search domains that are presented to the container. This parameter // maps to DnsSearch in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --dns-search option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --dns-search option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // - // This parameter is not supported for Windows containers. + // This parameter is not supported for Windows containers or tasks that use + // the awsvpc network mode. DnsSearchDomains []*string `locationName:"dnsSearchDomains" type:"list"` // A list of DNS servers that are presented to the container. This parameter // maps to Dns in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --dns option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --dns option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // - // This parameter is not supported for Windows containers. + // This parameter is not supported for Windows containers or tasks that use + // the awsvpc network mode. DnsServers []*string `locationName:"dnsServers" type:"list"` // A key/value map of labels to add to the container. This parameter maps to // Labels in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --label option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --label option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // This parameter requires version 1.18 of the Docker Remote API or greater // on your container instance. To check the Docker Remote API version on your // container instance, log in to your container instance and run the following @@ -7172,7 +7178,7 @@ type ContainerDefinition struct { // // This parameter maps to SecurityOpt in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --security-opt option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --security-opt option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // The Amazon ECS container agent running on a container instance must register // with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment @@ -7180,6 +7186,12 @@ type ContainerDefinition struct { // options. For more information, see Amazon ECS Container Agent Configuration // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) // in the Amazon Elastic Container Service Developer Guide. + // + // For more information about valid values, see Docker Run Security Configuration + // (https://docs.docker.com/engine/reference/run/#security-configuration). + // + // Valid values: "no-new-privileges" | "apparmor:PROFILE" | "label:value" | + // "credentialspec:CredentialSpecFilePath" DockerSecurityOptions []*string `locationName:"dockerSecurityOptions" type:"list"` // @@ -7190,7 +7202,7 @@ type ContainerDefinition struct { // The entry point that is passed to the container. This parameter maps to Entrypoint // in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --entrypoint option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --entrypoint option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint // (https://docs.docker.com/engine/reference/builder/#entrypoint). EntryPoint []*string `locationName:"entryPoint" type:"list"` @@ -7198,14 +7210,14 @@ type ContainerDefinition struct { // The environment variables to pass to a container. This parameter maps to // Env in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --env option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --env option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // We do not recommend using plaintext environment variables for sensitive information, // such as credential data. Environment []*KeyValuePair `locationName:"environment" type:"list"` // A list of files containing the environment variables to pass to a container. - // This parameter maps to the --env-file option to docker run (https://docs.docker.com/engine/reference/run/). + // This parameter maps to the --env-file option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // You can specify up to ten environment files. The file must have a .env file // extension. Each line in an environment file should contain an environment @@ -7243,7 +7255,7 @@ type ContainerDefinition struct { // on the container. This parameter maps to ExtraHosts in the Create a container // (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section // of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) and - // the --add-host option to docker run (https://docs.docker.com/engine/reference/run/). + // the --add-host option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // This parameter is not supported for Windows containers or tasks that use // the awsvpc network mode. @@ -7259,13 +7271,13 @@ type ContainerDefinition struct { // for the container. This parameter maps to HealthCheck in the Create a container // (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section // of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) and - // the HEALTHCHECK parameter of docker run (https://docs.docker.com/engine/reference/run/). + // the HEALTHCHECK parameter of docker run (https://docs.docker.com/engine/reference/run/#security-configuration). HealthCheck *HealthCheck `locationName:"healthCheck" type:"structure"` // The hostname to use for your container. This parameter maps to Hostname in // the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --hostname option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --hostname option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // The hostname parameter is not supported if you are using the awsvpc network // mode. @@ -7279,7 +7291,7 @@ type ContainerDefinition struct { // signs are allowed. This parameter maps to Image in the Create a container // (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section // of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) and - // the IMAGE parameter of docker run (https://docs.docker.com/engine/reference/run/). + // the IMAGE parameter of docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // * When a new task starts, the Amazon ECS container agent pulls the latest // version of the specified image and tag for the container to use. However, @@ -7305,7 +7317,7 @@ type ContainerDefinition struct { // that require stdin or a tty to be allocated. This parameter maps to OpenStdin // in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --interactive option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --interactive option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). Interactive *bool `locationName:"interactive" type:"boolean"` // The links parameter allows containers to communicate with each other without @@ -7317,9 +7329,10 @@ type ContainerDefinition struct { // in the Docker documentation. This parameter maps to Links in the Create a // container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --link option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --link option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // - // This parameter is not supported for Windows containers. + // This parameter is not supported for Windows containers or tasks that use + // the awsvpc network mode. // // Containers that are collocated on a single container instance may be able // to communicate with each other without requiring links or host port mappings. @@ -7337,7 +7350,7 @@ type ContainerDefinition struct { // // This parameter maps to LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // By default, containers use the same logging driver that the Docker daemon // uses. However the container may use a different logging driver than the Docker // daemon by specifying a log driver with this parameter in the container definition. @@ -7370,7 +7383,7 @@ type ContainerDefinition struct { // lower than the task memory value, if one is specified. This parameter maps // to Memory in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // If using the Fargate launch type, this parameter is optional. // @@ -7393,7 +7406,7 @@ type ContainerDefinition struct { // whichever comes first. This parameter maps to MemoryReservation in the Create // a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --memory-reservation option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --memory-reservation option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // If a task-level memory value is not specified, you must specify a non-zero // integer for one or both of memory or memoryReservation in a container definition. @@ -7417,7 +7430,7 @@ type ContainerDefinition struct { // // This parameter maps to Volumes in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --volume option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --volume option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // Windows containers can mount whole directories on the same drive as $env:ProgramData. // Windows containers cannot mount directories on a different drive, and mount @@ -7430,7 +7443,7 @@ type ContainerDefinition struct { // and lowercase), numbers, and hyphens are allowed. This parameter maps to // name in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --name option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --name option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). Name *string `locationName:"name" type:"string"` // The list of port mappings for the container. Port mappings allow containers @@ -7446,7 +7459,7 @@ type ContainerDefinition struct { // // This parameter maps to PortBindings in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --publish option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --publish option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // If the network mode of a task definition is set to none, then you can't specify // port mappings. If the network mode of a task definition is set to host, then // host ports must either be undefined or they must match the container port @@ -7462,7 +7475,7 @@ type ContainerDefinition struct { // the host container instance (similar to the root user). This parameter maps // to Privileged in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --privileged option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --privileged option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // This parameter is not supported for Windows containers or tasks using the // Fargate launch type. @@ -7471,16 +7484,17 @@ type ContainerDefinition struct { // When this parameter is true, a TTY is allocated. This parameter maps to Tty // in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --tty option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --tty option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). PseudoTerminal *bool `locationName:"pseudoTerminal" type:"boolean"` // When this parameter is true, the container is given read-only access to its // root file system. This parameter maps to ReadonlyRootfs in the Create a container // (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section // of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) and - // the --read-only option to docker run (https://docs.docker.com/engine/reference/run/). + // the --read-only option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // - // This parameter is not supported for Windows containers. + // This parameter is not supported for Windows containers or tasks that use + // the awsvpc network mode. ReadonlyRootFilesystem *bool `locationName:"readonlyRootFilesystem" type:"boolean"` // The private repository authentication credentials to use. @@ -7503,19 +7517,16 @@ type ContainerDefinition struct { // give up and not start. This results in the task transitioning to a STOPPED // state. // + // When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable + // is used, it is enforced indendently from this start timeout value. + // // For tasks using the Fargate launch type, this parameter requires that the - // task or service uses platform version 1.3.0 or later. If this parameter is - // not specified, the default value of 3 minutes is used. + // task or service uses platform version 1.3.0 or later. // - // For tasks using the EC2 launch type, if the startTimeout parameter is not - // specified, the value set for the Amazon ECS container agent configuration - // variable ECS_CONTAINER_START_TIMEOUT is used by default. If neither the startTimeout - // parameter or the ECS_CONTAINER_START_TIMEOUT agent configuration variable - // are set, then the default values of 3 minutes for Linux containers and 8 - // minutes on Windows containers are used. Your container instances require - // at least version 1.26.0 of the container agent to enable a container start - // timeout value. However, we recommend using the latest container agent version. - // For information about checking your agent version and updating to the latest + // For tasks using the EC2 launch type, your container instances require at + // least version 1.26.0 of the container agent to enable a container start timeout + // value. However, we recommend using the latest container agent version. For + // information about checking your agent version and updating to the latest // version, see Updating the Amazon ECS Container Agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) // in the Amazon Elastic Container Service Developer Guide. If you are using // an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 @@ -7555,7 +7566,7 @@ type ContainerDefinition struct { // A list of namespaced kernel parameters to set in the container. This parameter // maps to Sysctls in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --sysctl option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --sysctl option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // It is not recommended that you specify network-related systemControls parameters // for multiple containers in a single task that also uses either the awsvpc @@ -7569,20 +7580,21 @@ type ContainerDefinition struct { // in a task definition, it will override the default values set by Docker. // This parameter maps to Ulimits in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --ulimit option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --ulimit option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // Valid naming values are displayed in the Ulimit data type. This parameter // requires version 1.18 of the Docker Remote API or greater on your container // instance. To check the Docker Remote API version on your container instance, // log in to your container instance and run the following command: sudo docker // version --format '{{.Server.APIVersion}}' // - // This parameter is not supported for Windows containers. + // This parameter is not supported for Windows containers or tasks that use + // the awsvpc network mode. Ulimits []*Ulimit `locationName:"ulimits" type:"list"` // The user name to use inside the container. This parameter maps to User in // the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --user option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --user option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // You can use the following formats. If specifying a UID or GID, you must specify // it as a positive integer. @@ -7599,19 +7611,20 @@ type ContainerDefinition struct { // // * uid:group // - // This parameter is not supported for Windows containers. + // This parameter is not supported for Windows containers or tasks that use + // the awsvpc network mode. User *string `locationName:"user" type:"string"` // Data volumes to mount from another container. This parameter maps to VolumesFrom // in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --volumes-from option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --volumes-from option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). VolumesFrom []*VolumeFrom `locationName:"volumesFrom" type:"list"` // The working directory in which to run commands inside the container. This // parameter maps to WorkingDir in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --workdir option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --workdir option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). WorkingDirectory *string `locationName:"workingDirectory" type:"string"` } @@ -7986,9 +7999,11 @@ type ContainerDependency struct { // * COMPLETE - This condition validates that a dependent container runs // to completion (exits) before permitting other containers to start. This // can be useful for nonessential containers that run a script and then exit. + // This condition cannot be set on an essential container. // // * SUCCESS - This condition is the same as COMPLETE, but it also requires - // that the container exits with a zero status. + // that the container exits with a zero status. This condition cannot be + // set on an essential container. // // * HEALTHY - This condition validates that the dependent container passes // its Docker health check before permitting other containers to start. This @@ -8878,8 +8893,8 @@ type CreateServiceInput struct { // in the Amazon Elastic Container Service Developer Guide. // // If the service is using the rolling update (ECS) deployment controller and - // using either an Application Load Balancer or Network Load Balancer, you can - // specify multiple target groups to attach to the service. The service-linked + // using either an Application Load Balancer or Network Load Balancer, you must + // specify one or more target group ARNs to attach to the service. The service-linked // role is required for services that make use of multiple target groups. For // more information, see Using Service-Linked Roles for Amazon ECS (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) // in the Amazon Elastic Container Service Developer Guide. @@ -8903,15 +8918,16 @@ type CreateServiceInput struct { // For Application Load Balancers and Network Load Balancers, this object must // contain the load balancer target group ARN, the container name (as it appears // in a container definition), and the container port to access from the load - // balancer. When a task from this service is placed on a container instance, - // the container instance and port combination is registered as a target in - // the target group specified here. + // balancer. The load balancer name parameter must be omitted. When a task from + // this service is placed on a container instance, the container instance and + // port combination is registered as a target in the target group specified + // here. // // For Classic Load Balancers, this object must contain the load balancer name, // the container name (as it appears in a container definition), and the container - // port to access from the load balancer. When a task from this service is placed - // on a container instance, the container instance is registered with the load - // balancer specified here. + // port to access from the load balancer. The target group ARN parameter must + // be omitted. When a task from this service is placed on a container instance, + // the container instance is registered with the load balancer specified here. // // Services with tasks that use the awsvpc network mode (for example, those // with the Fargate launch type) only support Application Load Balancers and @@ -9046,8 +9062,8 @@ type CreateServiceInput struct { // to run in your service. If a revision is not specified, the latest ACTIVE // revision is used. // - // A task definition must be specified if the service is using the ECS deployment - // controller. + // A task definition must be specified if the service is using either the ECS + // or CODE_DEPLOY deployment controllers. TaskDefinition *string `locationName:"taskDefinition" type:"string"` } @@ -11355,10 +11371,11 @@ type EFSAuthorizationConfig struct { _ struct{} `type:"structure"` // The Amazon EFS access point ID to use. If an access point is specified, the - // root directory value specified in the EFSVolumeConfiguration will be relative - // to the directory set for the access point. If an access point is used, transit - // encryption must be enabled in the EFSVolumeConfiguration. For more information, - // see Working with Amazon EFS Access Points (https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) + // root directory value specified in the EFSVolumeConfiguration must either + // be omitted or set to / which will enforce the path set on the EFS access + // point. If an access point is used, transit encryption must be enabled in + // the EFSVolumeConfiguration. For more information, see Working with Amazon + // EFS Access Points (https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) // in the Amazon Elastic File System User Guide. AccessPointId *string `locationName:"accessPointId" type:"string"` @@ -11412,6 +11429,10 @@ type EFSVolumeConfiguration struct { // inside the host. If this parameter is omitted, the root of the Amazon EFS // volume will be used. Specifying / will have the same effect as omitting this // parameter. + // + // If an EFS access point is specified in the authorizationConfig, the root + // directory parameter must either be omitted or set to / which will enforce + // the path set on the EFS access point. RootDirectory *string `locationName:"rootDirectory" type:"string"` // Whether or not to enable encryption for Amazon EFS data in transit between @@ -11553,7 +11574,162 @@ func (s *EnvironmentFile) SetValue(v string) *EnvironmentFile { return s } -// A failed resource. +// The authorization configuration details for Amazon FSx for Windows File Server +// file system. See FSxWindowsFileServerVolumeConfiguration (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_FSxWindowsFileServerVolumeConfiguration.html) +// in the Amazon Elastic Container Service API Reference. +// +// For more information and the input format, see Amazon FSx for Windows File +// Server Volumes (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/wfsx-volumes.html) +// in the Amazon Elastic Container Service Developer Guide. +type FSxWindowsFileServerAuthorizationConfig struct { + _ struct{} `type:"structure"` + + // The authorization credential option to use. + // + // The authorization credential options can be provided using either the AWS + // Secrets Manager ARN or the AWS Systems Manager ARN. The ARNs refer to the + // stored credentials. + // + // options: + // + // * ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an AWS Secrets Manager (https://docs.aws.amazon.com/secretsmanager) + // secret. + // + // * ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // of an AWS Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/integration-ps-secretsmanager.html) + // parameter. + // + // CredentialsParameter is a required field + CredentialsParameter *string `locationName:"credentialsParameter" type:"string" required:"true"` + + // A fully qualified domain name hosted by an AWS Directory Service (https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) + // Managed Microsoft AD (Active Directory) or self-hosted EC2 AD. + // + // Domain is a required field + Domain *string `locationName:"domain" type:"string" required:"true"` +} + +// String returns the string representation +func (s FSxWindowsFileServerAuthorizationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FSxWindowsFileServerAuthorizationConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FSxWindowsFileServerAuthorizationConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FSxWindowsFileServerAuthorizationConfig"} + if s.CredentialsParameter == nil { + invalidParams.Add(request.NewErrParamRequired("CredentialsParameter")) + } + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCredentialsParameter sets the CredentialsParameter field's value. +func (s *FSxWindowsFileServerAuthorizationConfig) SetCredentialsParameter(v string) *FSxWindowsFileServerAuthorizationConfig { + s.CredentialsParameter = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *FSxWindowsFileServerAuthorizationConfig) SetDomain(v string) *FSxWindowsFileServerAuthorizationConfig { + s.Domain = &v + return s +} + +// This parameter is specified when you are using Amazon FSx for Windows File +// Server (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/what-is.html) +// file system for task storage. +// +// For more information and the input format, see Amazon FSx for Windows File +// Server Volumes (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/wfsx-volumes.html) +// in the Amazon Elastic Container Service Developer Guide. +type FSxWindowsFileServerVolumeConfiguration struct { + _ struct{} `type:"structure"` + + // The authorization configuration details for the Amazon FSx for Windows File + // Server file system. + // + // AuthorizationConfig is a required field + AuthorizationConfig *FSxWindowsFileServerAuthorizationConfig `locationName:"authorizationConfig" type:"structure" required:"true"` + + // The Amazon FSx for Windows File Server file system ID to use. + // + // FileSystemId is a required field + FileSystemId *string `locationName:"fileSystemId" type:"string" required:"true"` + + // The directory within the Amazon FSx for Windows File Server file system to + // mount as the root directory inside the host. + // + // RootDirectory is a required field + RootDirectory *string `locationName:"rootDirectory" type:"string" required:"true"` +} + +// String returns the string representation +func (s FSxWindowsFileServerVolumeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FSxWindowsFileServerVolumeConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FSxWindowsFileServerVolumeConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FSxWindowsFileServerVolumeConfiguration"} + if s.AuthorizationConfig == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizationConfig")) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.RootDirectory == nil { + invalidParams.Add(request.NewErrParamRequired("RootDirectory")) + } + if s.AuthorizationConfig != nil { + if err := s.AuthorizationConfig.Validate(); err != nil { + invalidParams.AddNested("AuthorizationConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthorizationConfig sets the AuthorizationConfig field's value. +func (s *FSxWindowsFileServerVolumeConfiguration) SetAuthorizationConfig(v *FSxWindowsFileServerAuthorizationConfig) *FSxWindowsFileServerVolumeConfiguration { + s.AuthorizationConfig = v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *FSxWindowsFileServerVolumeConfiguration) SetFileSystemId(v string) *FSxWindowsFileServerVolumeConfiguration { + s.FileSystemId = &v + return s +} + +// SetRootDirectory sets the RootDirectory field's value. +func (s *FSxWindowsFileServerVolumeConfiguration) SetRootDirectory(v string) *FSxWindowsFileServerVolumeConfiguration { + s.RootDirectory = &v + return s +} + +// A failed resource. For a list of common causes, see API failure reasons (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html) +// in the Amazon Elastic Container Service Developer Guide. type Failure struct { _ struct{} `type:"structure"` @@ -12047,11 +12223,9 @@ type KernelCapabilities struct { // configuration provided by Docker. This parameter maps to CapAdd in the Create // a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --cap-add option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --cap-add option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // - // The SYS_PTRACE capability is supported for tasks that use the Fargate launch - // type if they are also using platform version 1.4.0. The other capabilities - // are not supported for any platform versions. + // Tasks launched on AWS Fargate only support adding the SYS_PTRACE kernel capability. // // Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | // "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" @@ -12067,7 +12241,7 @@ type KernelCapabilities struct { // default configuration provided by Docker. This parameter maps to CapDrop // in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --cap-drop option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --cap-drop option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | // "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" @@ -12208,14 +12382,14 @@ type LinuxParameters struct { // Any host devices to expose to the container. This parameter maps to Devices // in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the --device option to docker run (https://docs.docker.com/engine/reference/run/). + // and the --device option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // If you are using tasks that use the Fargate launch type, the devices parameter // is not supported. Devices []*Device `locationName:"devices" type:"list"` // Run an init process inside the container that forwards signals and reaps - // processes. This parameter maps to the --init option to docker run (https://docs.docker.com/engine/reference/run/). + // processes. This parameter maps to the --init option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // This parameter requires version 1.25 of the Docker Remote API or greater // on your container instance. To check the Docker Remote API version on your // container instance, log in to your container instance and run the following @@ -12223,7 +12397,7 @@ type LinuxParameters struct { InitProcessEnabled *bool `locationName:"initProcessEnabled" type:"boolean"` // The total amount of swap memory (in MiB) a container can use. This parameter - // will be translated to the --memory-swap option to docker run (https://docs.docker.com/engine/reference/run/) + // will be translated to the --memory-swap option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration) // where the value would be the sum of the container memory plus the maxSwap // value. // @@ -12238,7 +12412,7 @@ type LinuxParameters struct { MaxSwap *int64 `locationName:"maxSwap" type:"integer"` // The value for the size (in MiB) of the /dev/shm volume. This parameter maps - // to the --shm-size option to docker run (https://docs.docker.com/engine/reference/run/). + // to the --shm-size option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // If you are using tasks that use the Fargate launch type, the sharedMemorySize // parameter is not supported. @@ -12250,14 +12424,14 @@ type LinuxParameters struct { // Accepted values are whole numbers between 0 and 100. If the swappiness parameter // is not specified, a default value of 60 is used. If a value is not specified // for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness - // option to docker run (https://docs.docker.com/engine/reference/run/). + // option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // If you are using tasks that use the Fargate launch type, the swappiness parameter // is not supported. Swappiness *int64 `locationName:"swappiness" type:"integer"` // The container path, mount options, and size (in MiB) of the tmpfs mount. - // This parameter maps to the --tmpfs option to docker run (https://docs.docker.com/engine/reference/run/). + // This parameter maps to the --tmpfs option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // If you are using tasks that use the Fargate launch type, the tmpfs parameter // is not supported. @@ -13436,18 +13610,16 @@ func (s *LoadBalancer) SetTargetGroupArn(v string) *LoadBalancer { return s } -// The log configuration specification for the container. -// -// This parameter maps to LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) +// The log configuration for the container. This parameter maps to LogConfig +// in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/commandline/run/). +// // By default, containers use the same logging driver that the Docker daemon // uses; however the container may use a different logging driver than the Docker -// daemon by specifying a log driver with this parameter in the container definition. -// To use a different logging driver for a container, the log system must be -// configured properly on the container instance (or on a different log server -// for remote logging options). For more information on the options for different -// supported log drivers, see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) +// daemon by specifying a log driver configuration in the container definition. +// For more information on the options for different supported log drivers, +// see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) // in the Docker documentation. // // The following should be noted when specifying a log configuration for your @@ -13461,38 +13633,35 @@ func (s *LoadBalancer) SetTargetGroupArn(v string) *LoadBalancer { // * This parameter requires version 1.18 of the Docker Remote API or greater // on your container instance. // -// * For tasks using the EC2 launch type, the Amazon ECS container agent -// running on a container instance must register the logging drivers available -// on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable -// before containers placed on that instance can use these log configuration -// options. For more information, see Amazon ECS Container Agent Configuration -// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) +// * For tasks hosted on Amazon EC2 instances, the Amazon ECS container agent +// must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS +// environment variable before containers placed on that instance can use +// these log configuration options. For more information, see Amazon ECS +// container agent configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) // in the Amazon Elastic Container Service Developer Guide. // -// * For tasks using the Fargate launch type, because you do not have access -// to the underlying infrastructure your tasks are hosted on, any additional -// software needed will have to be installed outside of the task. For example, -// the Fluentd output aggregators or a remote host running Logstash to send -// Gelf logs to. +// * For tasks on AWS Fargate, because you do not have access to the underlying +// infrastructure your tasks are hosted on, any additional software needed +// will have to be installed outside of the task. For example, the Fluentd +// output aggregators or a remote host running Logstash to send Gelf logs +// to. type LogConfiguration struct { _ struct{} `type:"structure"` - // The log driver to use for the container. The valid values listed earlier - // are log drivers that the Amazon ECS container agent can communicate with - // by default. + // The log driver to use for the container. // - // For tasks using the Fargate launch type, the supported log drivers are awslogs, - // splunk, and awsfirelens. + // For tasks on AWS Fargate, the supported log drivers are awslogs, splunk, + // and awsfirelens. // - // For tasks using the EC2 launch type, the supported log drivers are awslogs, + // For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, // fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens. // // For more information about using the awslogs log driver, see Using the awslogs - // Log Driver (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) + // log driver (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) // in the Amazon Elastic Container Service Developer Guide. // - // For more information about using the awsfirelens log driver, see Custom Log - // Routing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) + // For more information about using the awsfirelens log driver, see Custom log + // routing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) // in the Amazon Elastic Container Service Developer Guide. // // If you have a custom driver that is not listed, you can fork the Amazon ECS @@ -13582,14 +13751,23 @@ func (s *LogConfiguration) SetSecretOptions(v []*Secret) *LogConfiguration { type ManagedScaling struct { _ struct{} `type:"structure"` - // The maximum number of container instances that Amazon ECS will scale in or - // scale out at one time. If this parameter is omitted, the default value of - // 10000 is used. + // The maximum number of Amazon EC2 instances that Amazon ECS will scale out + // at one time. The scale in process is not affected by this parameter. If this + // parameter is omitted, the default value of 10000 is used. MaximumScalingStepSize *int64 `locationName:"maximumScalingStepSize" min:"1" type:"integer"` - // The minimum number of container instances that Amazon ECS will scale in or - // scale out at one time. If this parameter is omitted, the default value of - // 1 is used. + // The minimum number of Amazon EC2 instances that Amazon ECS will scale out + // at one time. The scale in process is not affected by this parameter If this + // parameter is omitted, the default value of 1 is used. + // + // When additional capacity is required, Amazon ECS will scale up the minimum + // scaling step size even if the actual demand is less than the minimum scaling + // step size. + // + // If you use a capacity provider with an Auto Scaling group configured with + // more than one Amazon EC2 instance type or Availability Zone, Amazon ECS will + // scale up by the exact minimum scaling step size value and will ignore both + // the maximum scaling step size as well as the capacity demand. MinimumScalingStepSize *int64 `locationName:"minimumScalingStepSize" min:"1" type:"integer"` // Whether or not to enable managed scaling for the capacity provider. @@ -14239,9 +14417,6 @@ type PortMapping struct { // receives a host port in the ephemeral port range. For more information, see // hostPort. Port mappings that are automatically assigned in this way do not // count toward the 100 reserved ports limit of a container instance. - // - // You cannot expose the same container port for multiple protocols. An error - // will be returned if this is attempted. ContainerPort *int64 `locationName:"containerPort" type:"integer"` // The port number on the container instance to reserve for your container. @@ -15180,8 +15355,10 @@ type RegisterTaskDefinitionInput struct { // version 1.3.0 or later. ProxyConfiguration *ProxyConfiguration `locationName:"proxyConfiguration" type:"structure"` - // The launch type required by the task. If no value is specified, it defaults - // to EC2. + // The task launch type that Amazon ECS should validate the task definition + // against. This ensures that the task definition parameters are compatible + // with the specified launch type. If no value is specified, it defaults to + // EC2. RequiresCompatibilities []*string `locationName:"requiresCompatibilities" type:"list"` // The metadata that you apply to the task definition to help you categorize @@ -17473,7 +17650,7 @@ func (s *SubmitTaskStateChangeOutput) SetAcknowledgment(v string) *SubmitTaskSta // A list of namespaced kernel parameters to set in the container. This parameter // maps to Sysctls in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) -// and the --sysctl option to docker run (https://docs.docker.com/engine/reference/run/). +// and the --sysctl option to docker run (https://docs.docker.com/engine/reference/run/#security-configuration). // // It is not recommended that you specify network-related systemControls parameters // for multiple containers in a single task that also uses either the awsvpc @@ -18264,9 +18441,11 @@ type TaskDefinition struct { // The amount (in MiB) of memory used by the task. // - // If using the EC2 launch type, this field is optional and any value can be - // used. If a task-level memory value is specified then the container-level - // memory value is optional. + // If using the EC2 launch type, you must specify either a task-level memory + // value or a container-level memory value. This field is optional and any value + // can be used. If a task-level memory value is specified then the container-level + // memory value is optional. For more information regarding container-level + // memory and memory reservation, see ContainerDefinition (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html). // // If using the Fargate launch type, this field is required and you must use // one of the following values, which determines your range of valid values @@ -19039,7 +19218,7 @@ type Tmpfs struct { // | "mode" | "uid" | "gid" | "nr_inodes" | "nr_blocks" | "mpol" MountOptions []*string `locationName:"mountOptions" type:"list"` - // The size (in MiB) of the tmpfs volume. + // The maximum size (in MiB) of the tmpfs volume. // // Size is a required field Size *int64 `locationName:"size" type:"integer" required:"true"` @@ -20078,11 +20257,13 @@ func (s *VersionInfo) SetDockerVersion(v string) *VersionInfo { return s } -// A data volume used in a task definition. For tasks that use Amazon Elastic -// File System (Amazon EFS) file storage, specify an efsVolumeConfiguration. -// For tasks that use a Docker volume, specify a DockerVolumeConfiguration. -// For tasks that use a bind mount host volume, specify a host and optional -// sourcePath. For more information, see Using Data Volumes in Tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html). +// A data volume used in a task definition. For tasks that use the Amazon Elastic +// File System (Amazon EFS), specify an efsVolumeConfiguration. For Windows +// tasks that use Amazon FSx for Windows File Server file system, specify a +// fsxWindowsFileServerVolumeConfiguration. For tasks that use a Docker volume, +// specify a DockerVolumeConfiguration. For tasks that use a bind mount host +// volume, specify a host and optional sourcePath. For more information, see +// Using Data Volumes in Tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html). type Volume struct { _ struct{} `type:"structure"` @@ -20096,6 +20277,10 @@ type Volume struct { // file system for task storage. EfsVolumeConfiguration *EFSVolumeConfiguration `locationName:"efsVolumeConfiguration" type:"structure"` + // This parameter is specified when you are using Amazon FSx for Windows File + // Server file system for task storage. + FsxWindowsFileServerVolumeConfiguration *FSxWindowsFileServerVolumeConfiguration `locationName:"fsxWindowsFileServerVolumeConfiguration" type:"structure"` + // This parameter is specified when you are using bind mount host volumes. The // contents of the host parameter determine whether your bind mount host volume // persists on the host container instance and where it is stored. If the host @@ -20133,6 +20318,11 @@ func (s *Volume) Validate() error { invalidParams.AddNested("EfsVolumeConfiguration", err.(request.ErrInvalidParams)) } } + if s.FsxWindowsFileServerVolumeConfiguration != nil { + if err := s.FsxWindowsFileServerVolumeConfiguration.Validate(); err != nil { + invalidParams.AddNested("FsxWindowsFileServerVolumeConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -20152,6 +20342,12 @@ func (s *Volume) SetEfsVolumeConfiguration(v *EFSVolumeConfiguration) *Volume { return s } +// SetFsxWindowsFileServerVolumeConfiguration sets the FsxWindowsFileServerVolumeConfiguration field's value. +func (s *Volume) SetFsxWindowsFileServerVolumeConfiguration(v *FSxWindowsFileServerVolumeConfiguration) *Volume { + s.FsxWindowsFileServerVolumeConfiguration = v + return s +} + // SetHost sets the Host field's value. func (s *Volume) SetHost(v *HostVolumeProperties) *Volume { s.Host = v diff --git a/service/elasticsearchservice/api.go b/service/elasticsearchservice/api.go index c0c0f117fd4..292fa71a8b8 100644 --- a/service/elasticsearchservice/api.go +++ b/service/elasticsearchservice/api.go @@ -2400,6 +2400,158 @@ func (c *ElasticsearchService) GetCompatibleElasticsearchVersionsWithContext(ctx return out, req.Send() } +const opGetPackageVersionHistory = "GetPackageVersionHistory" + +// GetPackageVersionHistoryRequest generates a "aws/request.Request" representing the +// client's request for the GetPackageVersionHistory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPackageVersionHistory for more information on using the GetPackageVersionHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPackageVersionHistoryRequest method. +// req, resp := client.GetPackageVersionHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) GetPackageVersionHistoryRequest(input *GetPackageVersionHistoryInput) (req *request.Request, output *GetPackageVersionHistoryOutput) { + op := &request.Operation{ + Name: opGetPackageVersionHistory, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/packages/{PackageID}/history", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetPackageVersionHistoryInput{} + } + + output = &GetPackageVersionHistoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPackageVersionHistory API operation for Amazon Elasticsearch Service. +// +// Returns a list of versions of the package, along with their creation time +// and commit message. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation GetPackageVersionHistory for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * AccessDeniedException +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) GetPackageVersionHistory(input *GetPackageVersionHistoryInput) (*GetPackageVersionHistoryOutput, error) { + req, out := c.GetPackageVersionHistoryRequest(input) + return out, req.Send() +} + +// GetPackageVersionHistoryWithContext is the same as GetPackageVersionHistory with the addition of +// the ability to pass a context and additional request options. +// +// See GetPackageVersionHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) GetPackageVersionHistoryWithContext(ctx aws.Context, input *GetPackageVersionHistoryInput, opts ...request.Option) (*GetPackageVersionHistoryOutput, error) { + req, out := c.GetPackageVersionHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetPackageVersionHistoryPages iterates over the pages of a GetPackageVersionHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetPackageVersionHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetPackageVersionHistory operation. +// pageNum := 0 +// err := client.GetPackageVersionHistoryPages(params, +// func(page *elasticsearchservice.GetPackageVersionHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticsearchService) GetPackageVersionHistoryPages(input *GetPackageVersionHistoryInput, fn func(*GetPackageVersionHistoryOutput, bool) bool) error { + return c.GetPackageVersionHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetPackageVersionHistoryPagesWithContext same as GetPackageVersionHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) GetPackageVersionHistoryPagesWithContext(ctx aws.Context, input *GetPackageVersionHistoryInput, fn func(*GetPackageVersionHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetPackageVersionHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetPackageVersionHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetPackageVersionHistoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetUpgradeHistory = "GetUpgradeHistory" // GetUpgradeHistoryRequest generates a "aws/request.Request" representing the @@ -3865,6 +4017,103 @@ func (c *ElasticsearchService) UpdateElasticsearchDomainConfigWithContext(ctx aw return out, req.Send() } +const opUpdatePackage = "UpdatePackage" + +// UpdatePackageRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePackage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePackage for more information on using the UpdatePackage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePackageRequest method. +// req, resp := client.UpdatePackageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) UpdatePackageRequest(input *UpdatePackageInput) (req *request.Request, output *UpdatePackageOutput) { + op := &request.Operation{ + Name: opUpdatePackage, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/packages/update", + } + + if input == nil { + input = &UpdatePackageInput{} + } + + output = &UpdatePackageOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdatePackage API operation for Amazon Elasticsearch Service. +// +// Updates a package for use with Amazon ES domains. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation UpdatePackage for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * LimitExceededException +// An exception for trying to create more than allowed resources or sub-resources. +// Gives http status code of 409. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * AccessDeniedException +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) UpdatePackage(input *UpdatePackageInput) (*UpdatePackageOutput, error) { + req, out := c.UpdatePackageRequest(input) + return out, req.Send() +} + +// UpdatePackageWithContext is the same as UpdatePackage with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePackage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) UpdatePackageWithContext(ctx aws.Context, input *UpdatePackageInput, opts ...request.Option) (*UpdatePackageOutput, error) { + req, out := c.UpdatePackageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpgradeElasticsearchDomain = "UpgradeElasticsearchDomain" // UpgradeElasticsearchDomainRequest generates a "aws/request.Request" representing the @@ -6798,6 +7047,8 @@ type DomainPackageDetails struct { // Currently supports only TXT-DICTIONARY. PackageType *string `type:"string" enum:"PackageType"` + PackageVersion *string `type:"string"` + // The relative path on Amazon ES nodes, which can be used as synonym_path when // the package is synonym file. ReferencePath *string `type:"string"` @@ -6855,6 +7106,12 @@ func (s *DomainPackageDetails) SetPackageType(v string) *DomainPackageDetails { return s } +// SetPackageVersion sets the PackageVersion field's value. +func (s *DomainPackageDetails) SetPackageVersion(v string) *DomainPackageDetails { + s.PackageVersion = &v + return s +} + // SetReferencePath sets the ReferencePath field's value. func (s *DomainPackageDetails) SetReferencePath(v string) *DomainPackageDetails { s.ReferencePath = &v @@ -7755,6 +8012,107 @@ func (s *GetCompatibleElasticsearchVersionsOutput) SetCompatibleElasticsearchVer return s } +// Container for request parameters to GetPackageVersionHistory operation. +type GetPackageVersionHistoryInput struct { + _ struct{} `type:"structure"` + + // Limits results to a maximum number of versions. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // Used for pagination. Only necessary if a previous API call includes a non-null + // NextToken value. If provided, returns results for the next page. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // Returns an audit history of versions of the package. + // + // PackageID is a required field + PackageID *string `location:"uri" locationName:"PackageID" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPackageVersionHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPackageVersionHistoryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPackageVersionHistoryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPackageVersionHistoryInput"} + if s.PackageID == nil { + invalidParams.Add(request.NewErrParamRequired("PackageID")) + } + if s.PackageID != nil && len(*s.PackageID) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PackageID", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetPackageVersionHistoryInput) SetMaxResults(v int64) *GetPackageVersionHistoryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetPackageVersionHistoryInput) SetNextToken(v string) *GetPackageVersionHistoryInput { + s.NextToken = &v + return s +} + +// SetPackageID sets the PackageID field's value. +func (s *GetPackageVersionHistoryInput) SetPackageID(v string) *GetPackageVersionHistoryInput { + s.PackageID = &v + return s +} + +// Container for response returned by GetPackageVersionHistory operation. +type GetPackageVersionHistoryOutput struct { + _ struct{} `type:"structure"` + + NextToken *string `type:"string"` + + PackageID *string `type:"string"` + + // List of PackageVersionHistory objects. + PackageVersionHistoryList []*PackageVersionHistory `type:"list"` +} + +// String returns the string representation +func (s GetPackageVersionHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPackageVersionHistoryOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetPackageVersionHistoryOutput) SetNextToken(v string) *GetPackageVersionHistoryOutput { + s.NextToken = &v + return s +} + +// SetPackageID sets the PackageID field's value. +func (s *GetPackageVersionHistoryOutput) SetPackageID(v string) *GetPackageVersionHistoryOutput { + s.PackageID = &v + return s +} + +// SetPackageVersionHistoryList sets the PackageVersionHistoryList field's value. +func (s *GetPackageVersionHistoryOutput) SetPackageVersionHistoryList(v []*PackageVersionHistory) *GetPackageVersionHistoryOutput { + s.PackageVersionHistoryList = v + return s +} + // Container for request parameters to GetUpgradeHistory operation. type GetUpgradeHistoryInput struct { _ struct{} `type:"structure"` @@ -9254,12 +9612,16 @@ func (s *OutboundCrossClusterSearchConnectionStatus) SetStatusCode(v string) *Ou type PackageDetails struct { _ struct{} `type:"structure"` + AvailablePackageVersion *string `type:"string"` + // Timestamp which tells creation date of the package. CreatedAt *time.Time `type:"timestamp"` // Additional information if the package is in an error state. Null otherwise. ErrorDetails *ErrorDetails `type:"structure"` + LastUpdatedAt *time.Time `type:"timestamp"` + // User-specified description of the package. PackageDescription *string `type:"string"` @@ -9286,6 +9648,12 @@ func (s PackageDetails) GoString() string { return s.String() } +// SetAvailablePackageVersion sets the AvailablePackageVersion field's value. +func (s *PackageDetails) SetAvailablePackageVersion(v string) *PackageDetails { + s.AvailablePackageVersion = &v + return s +} + // SetCreatedAt sets the CreatedAt field's value. func (s *PackageDetails) SetCreatedAt(v time.Time) *PackageDetails { s.CreatedAt = &v @@ -9298,6 +9666,12 @@ func (s *PackageDetails) SetErrorDetails(v *ErrorDetails) *PackageDetails { return s } +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *PackageDetails) SetLastUpdatedAt(v time.Time) *PackageDetails { + s.LastUpdatedAt = &v + return s +} + // SetPackageDescription sets the PackageDescription field's value. func (s *PackageDetails) SetPackageDescription(v string) *PackageDetails { s.PackageDescription = &v @@ -9374,6 +9748,48 @@ func (s *PackageSource) SetS3Key(v string) *PackageSource { return s } +// Details of a package version. +type PackageVersionHistory struct { + _ struct{} `type:"structure"` + + // A message associated with the version. + CommitMessage *string `type:"string"` + + // Timestamp which tells creation time of the package version. + CreatedAt *time.Time `type:"timestamp"` + + // Version of the package. + PackageVersion *string `type:"string"` +} + +// String returns the string representation +func (s PackageVersionHistory) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PackageVersionHistory) GoString() string { + return s.String() +} + +// SetCommitMessage sets the CommitMessage field's value. +func (s *PackageVersionHistory) SetCommitMessage(v string) *PackageVersionHistory { + s.CommitMessage = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *PackageVersionHistory) SetCreatedAt(v time.Time) *PackageVersionHistory { + s.CreatedAt = &v + return s +} + +// SetPackageVersion sets the PackageVersion field's value. +func (s *PackageVersionHistory) SetPackageVersion(v string) *PackageVersionHistory { + s.PackageVersion = &v + return s +} + // Container for parameters to PurchaseReservedElasticsearchInstanceOffering type PurchaseReservedElasticsearchInstanceOfferingInput struct { _ struct{} `type:"structure"` @@ -10765,6 +11181,106 @@ func (s *UpdateElasticsearchDomainConfigOutput) SetDomainConfig(v *Elasticsearch return s } +// Container for request parameters to UpdatePackage operation. +type UpdatePackageInput struct { + _ struct{} `type:"structure"` + + // An info message for the new version which will be shown as part of GetPackageVersionHistoryResponse. + CommitMessage *string `type:"string"` + + // New description of the package. + PackageDescription *string `type:"string"` + + // Unique identifier for the package. + // + // PackageID is a required field + PackageID *string `type:"string" required:"true"` + + // The S3 location for importing the package specified as S3BucketName and S3Key + // + // PackageSource is a required field + PackageSource *PackageSource `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdatePackageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePackageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePackageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdatePackageInput"} + if s.PackageID == nil { + invalidParams.Add(request.NewErrParamRequired("PackageID")) + } + if s.PackageSource == nil { + invalidParams.Add(request.NewErrParamRequired("PackageSource")) + } + if s.PackageSource != nil { + if err := s.PackageSource.Validate(); err != nil { + invalidParams.AddNested("PackageSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommitMessage sets the CommitMessage field's value. +func (s *UpdatePackageInput) SetCommitMessage(v string) *UpdatePackageInput { + s.CommitMessage = &v + return s +} + +// SetPackageDescription sets the PackageDescription field's value. +func (s *UpdatePackageInput) SetPackageDescription(v string) *UpdatePackageInput { + s.PackageDescription = &v + return s +} + +// SetPackageID sets the PackageID field's value. +func (s *UpdatePackageInput) SetPackageID(v string) *UpdatePackageInput { + s.PackageID = &v + return s +} + +// SetPackageSource sets the PackageSource field's value. +func (s *UpdatePackageInput) SetPackageSource(v *PackageSource) *UpdatePackageInput { + s.PackageSource = v + return s +} + +// Container for response returned by UpdatePackage operation. +type UpdatePackageOutput struct { + _ struct{} `type:"structure"` + + // Information about the package PackageDetails. + PackageDetails *PackageDetails `type:"structure"` +} + +// String returns the string representation +func (s UpdatePackageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdatePackageOutput) GoString() string { + return s.String() +} + +// SetPackageDetails sets the PackageDetails field's value. +func (s *UpdatePackageOutput) SetPackageDetails(v *PackageDetails) *UpdatePackageOutput { + s.PackageDetails = v + return s +} + // Container for request parameters to UpgradeElasticsearchDomain operation. type UpgradeElasticsearchDomainInput struct { _ struct{} `type:"structure"` diff --git a/service/elasticsearchservice/elasticsearchserviceiface/interface.go b/service/elasticsearchservice/elasticsearchserviceiface/interface.go index a2c497f877a..8fa8cc1bbc1 100644 --- a/service/elasticsearchservice/elasticsearchserviceiface/interface.go +++ b/service/elasticsearchservice/elasticsearchserviceiface/interface.go @@ -167,6 +167,13 @@ type ElasticsearchServiceAPI interface { GetCompatibleElasticsearchVersionsWithContext(aws.Context, *elasticsearchservice.GetCompatibleElasticsearchVersionsInput, ...request.Option) (*elasticsearchservice.GetCompatibleElasticsearchVersionsOutput, error) GetCompatibleElasticsearchVersionsRequest(*elasticsearchservice.GetCompatibleElasticsearchVersionsInput) (*request.Request, *elasticsearchservice.GetCompatibleElasticsearchVersionsOutput) + GetPackageVersionHistory(*elasticsearchservice.GetPackageVersionHistoryInput) (*elasticsearchservice.GetPackageVersionHistoryOutput, error) + GetPackageVersionHistoryWithContext(aws.Context, *elasticsearchservice.GetPackageVersionHistoryInput, ...request.Option) (*elasticsearchservice.GetPackageVersionHistoryOutput, error) + GetPackageVersionHistoryRequest(*elasticsearchservice.GetPackageVersionHistoryInput) (*request.Request, *elasticsearchservice.GetPackageVersionHistoryOutput) + + GetPackageVersionHistoryPages(*elasticsearchservice.GetPackageVersionHistoryInput, func(*elasticsearchservice.GetPackageVersionHistoryOutput, bool) bool) error + GetPackageVersionHistoryPagesWithContext(aws.Context, *elasticsearchservice.GetPackageVersionHistoryInput, func(*elasticsearchservice.GetPackageVersionHistoryOutput, bool) bool, ...request.Option) error + GetUpgradeHistory(*elasticsearchservice.GetUpgradeHistoryInput) (*elasticsearchservice.GetUpgradeHistoryOutput, error) GetUpgradeHistoryWithContext(aws.Context, *elasticsearchservice.GetUpgradeHistoryInput, ...request.Option) (*elasticsearchservice.GetUpgradeHistoryOutput, error) GetUpgradeHistoryRequest(*elasticsearchservice.GetUpgradeHistoryInput) (*request.Request, *elasticsearchservice.GetUpgradeHistoryOutput) @@ -234,6 +241,10 @@ type ElasticsearchServiceAPI interface { UpdateElasticsearchDomainConfigWithContext(aws.Context, *elasticsearchservice.UpdateElasticsearchDomainConfigInput, ...request.Option) (*elasticsearchservice.UpdateElasticsearchDomainConfigOutput, error) UpdateElasticsearchDomainConfigRequest(*elasticsearchservice.UpdateElasticsearchDomainConfigInput) (*request.Request, *elasticsearchservice.UpdateElasticsearchDomainConfigOutput) + UpdatePackage(*elasticsearchservice.UpdatePackageInput) (*elasticsearchservice.UpdatePackageOutput, error) + UpdatePackageWithContext(aws.Context, *elasticsearchservice.UpdatePackageInput, ...request.Option) (*elasticsearchservice.UpdatePackageOutput, error) + UpdatePackageRequest(*elasticsearchservice.UpdatePackageInput) (*request.Request, *elasticsearchservice.UpdatePackageOutput) + UpgradeElasticsearchDomain(*elasticsearchservice.UpgradeElasticsearchDomainInput) (*elasticsearchservice.UpgradeElasticsearchDomainOutput, error) UpgradeElasticsearchDomainWithContext(aws.Context, *elasticsearchservice.UpgradeElasticsearchDomainInput, ...request.Option) (*elasticsearchservice.UpgradeElasticsearchDomainOutput, error) UpgradeElasticsearchDomainRequest(*elasticsearchservice.UpgradeElasticsearchDomainInput) (*request.Request, *elasticsearchservice.UpgradeElasticsearchDomainOutput) diff --git a/service/fsx/api.go b/service/fsx/api.go index 41919d1cd99..d147973274f 100644 --- a/service/fsx/api.go +++ b/service/fsx/api.go @@ -13,6 +13,103 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) +const opAssociateFileSystemAliases = "AssociateFileSystemAliases" + +// AssociateFileSystemAliasesRequest generates a "aws/request.Request" representing the +// client's request for the AssociateFileSystemAliases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateFileSystemAliases for more information on using the AssociateFileSystemAliases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateFileSystemAliasesRequest method. +// req, resp := client.AssociateFileSystemAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/AssociateFileSystemAliases +func (c *FSx) AssociateFileSystemAliasesRequest(input *AssociateFileSystemAliasesInput) (req *request.Request, output *AssociateFileSystemAliasesOutput) { + op := &request.Operation{ + Name: opAssociateFileSystemAliases, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateFileSystemAliasesInput{} + } + + output = &AssociateFileSystemAliasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateFileSystemAliases API operation for Amazon FSx. +// +// Use this action to associate one or more Domain Name Server (DNS) aliases +// with an existing Amazon FSx for Windows File Server file system. A file systen +// can have a maximum of 50 DNS aliases associated with it at any one time. +// If you try to associate a DNS alias that is already associated with the file +// system, FSx takes no action on that alias in the request. For more information, +// see Working with DNS Aliases (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html) +// and Walkthrough 5: Using DNS aliases to access your file system (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/walkthrough05-file-system-custom-CNAME.html), +// including additional steps you must take to be able to access your file system +// using a DNS alias. +// +// The system response shows the DNS aliases that Amazon FSx is attempting to +// associate with the file system. Use the API operation to monitor the status +// of the aliases Amazon FSx is associating with the file system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation AssociateFileSystemAliases for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/AssociateFileSystemAliases +func (c *FSx) AssociateFileSystemAliases(input *AssociateFileSystemAliasesInput) (*AssociateFileSystemAliasesOutput, error) { + req, out := c.AssociateFileSystemAliasesRequest(input) + return out, req.Send() +} + +// AssociateFileSystemAliasesWithContext is the same as AssociateFileSystemAliases with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateFileSystemAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) AssociateFileSystemAliasesWithContext(ctx aws.Context, input *AssociateFileSystemAliasesInput, opts ...request.Option) (*AssociateFileSystemAliasesOutput, error) { + req, out := c.AssociateFileSystemAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCancelDataRepositoryTask = "CancelDataRepositoryTask" // CancelDataRepositoryTaskRequest generates a "aws/request.Request" representing the @@ -1187,6 +1284,153 @@ func (c *FSx) DescribeDataRepositoryTasksPagesWithContext(ctx aws.Context, input return p.Err() } +const opDescribeFileSystemAliases = "DescribeFileSystemAliases" + +// DescribeFileSystemAliasesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFileSystemAliases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFileSystemAliases for more information on using the DescribeFileSystemAliases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFileSystemAliasesRequest method. +// req, resp := client.DescribeFileSystemAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeFileSystemAliases +func (c *FSx) DescribeFileSystemAliasesRequest(input *DescribeFileSystemAliasesInput) (req *request.Request, output *DescribeFileSystemAliasesOutput) { + op := &request.Operation{ + Name: opDescribeFileSystemAliases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeFileSystemAliasesInput{} + } + + output = &DescribeFileSystemAliasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFileSystemAliases API operation for Amazon FSx. +// +// Returns the DNS aliases that are associated with the specified Amazon FSx +// for Windows File Server file system. A history of all DNS aliases that have +// been associated with and disassociated from the file system is available +// in the list of AdministrativeAction provided in the DescribeFileSystems operation +// response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation DescribeFileSystemAliases for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DescribeFileSystemAliases +func (c *FSx) DescribeFileSystemAliases(input *DescribeFileSystemAliasesInput) (*DescribeFileSystemAliasesOutput, error) { + req, out := c.DescribeFileSystemAliasesRequest(input) + return out, req.Send() +} + +// DescribeFileSystemAliasesWithContext is the same as DescribeFileSystemAliases with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFileSystemAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeFileSystemAliasesWithContext(ctx aws.Context, input *DescribeFileSystemAliasesInput, opts ...request.Option) (*DescribeFileSystemAliasesOutput, error) { + req, out := c.DescribeFileSystemAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeFileSystemAliasesPages iterates over the pages of a DescribeFileSystemAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFileSystemAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFileSystemAliases operation. +// pageNum := 0 +// err := client.DescribeFileSystemAliasesPages(params, +// func(page *fsx.DescribeFileSystemAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *FSx) DescribeFileSystemAliasesPages(input *DescribeFileSystemAliasesInput, fn func(*DescribeFileSystemAliasesOutput, bool) bool) error { + return c.DescribeFileSystemAliasesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFileSystemAliasesPagesWithContext same as DescribeFileSystemAliasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DescribeFileSystemAliasesPagesWithContext(ctx aws.Context, input *DescribeFileSystemAliasesInput, fn func(*DescribeFileSystemAliasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFileSystemAliasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFileSystemAliasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFileSystemAliasesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeFileSystems = "DescribeFileSystems" // DescribeFileSystemsRequest generates a "aws/request.Request" representing the @@ -1353,6 +1597,100 @@ func (c *FSx) DescribeFileSystemsPagesWithContext(ctx aws.Context, input *Descri return p.Err() } +const opDisassociateFileSystemAliases = "DisassociateFileSystemAliases" + +// DisassociateFileSystemAliasesRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateFileSystemAliases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateFileSystemAliases for more information on using the DisassociateFileSystemAliases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateFileSystemAliasesRequest method. +// req, resp := client.DisassociateFileSystemAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DisassociateFileSystemAliases +func (c *FSx) DisassociateFileSystemAliasesRequest(input *DisassociateFileSystemAliasesInput) (req *request.Request, output *DisassociateFileSystemAliasesOutput) { + op := &request.Operation{ + Name: opDisassociateFileSystemAliases, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateFileSystemAliasesInput{} + } + + output = &DisassociateFileSystemAliasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateFileSystemAliases API operation for Amazon FSx. +// +// Use this action to disassociate, or remove, one or more Domain Name Service +// (DNS) aliases from an Amazon FSx for Windows File Server file system. If +// you attempt to disassociate a DNS alias that is not associated with the file +// system, Amazon FSx responds with a 400 Bad Request. For more information, +// see Working with DNS Aliases (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html). +// +// The system generated response showing the DNS aliases that Amazon FSx is +// attempting to disassociate from the file system. Use the API operation to +// monitor the status of the aliases Amazon FSx is disassociating with the file +// system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon FSx's +// API operation DisassociateFileSystemAliases for usage and error information. +// +// Returned Error Types: +// * BadRequest +// A generic error indicating a failure with a client request. +// +// * FileSystemNotFound +// No Amazon FSx file systems were found based upon supplied parameters. +// +// * InternalServerError +// A generic error indicating a server-side failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/DisassociateFileSystemAliases +func (c *FSx) DisassociateFileSystemAliases(input *DisassociateFileSystemAliasesInput) (*DisassociateFileSystemAliasesOutput, error) { + req, out := c.DisassociateFileSystemAliasesRequest(input) + return out, req.Send() +} + +// DisassociateFileSystemAliasesWithContext is the same as DisassociateFileSystemAliases with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateFileSystemAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FSx) DisassociateFileSystemAliasesWithContext(ctx aws.Context, input *DisassociateFileSystemAliasesInput, opts ...request.Option) (*DisassociateFileSystemAliasesOutput, error) { + req, out := c.DisassociateFileSystemAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -1887,7 +2225,7 @@ type AdministrativeAction struct { // // * FILE_SYSTEM_UPDATE - A file system update administrative action initiated // by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI - // (update-file-system). A + // (update-file-system). // // * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase // a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION @@ -1896,12 +2234,19 @@ type AdministrativeAction struct { // progress using the ProgressPercent property. When STORAGE_OPTIMIZATION // completes successfully, the parent FILE_SYSTEM_UPDATE action status changes // to COMPLETED. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + // + // * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a + // new DNS alias with the file system. For more information, see . + // + // * FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate + // a DNS alias from the file system. For more information, see . AdministrativeActionType *string `type:"string" enum:"AdministrativeActionType"` // Provides information about a failed administrative action. FailureDetails *AdministrativeActionFailureDetails `type:"structure"` // Provides the percent complete of a STORAGE_OPTIMIZATION administrative action. + // Does not apply to any other administrative action type. ProgressPercent *int64 `type:"integer"` // Time that the administrative action request was received. @@ -1923,79 +2268,258 @@ type AdministrativeAction struct { // see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). Status *string `type:"string" enum:"Status"` - // Describes the target StorageCapacity or ThroughputCapacity value provided - // in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative - // actions. - TargetFileSystemValues *FileSystem `type:"structure"` + // Describes the target value for the administration action, provided in the + // UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative + // actions. + TargetFileSystemValues *FileSystem `type:"structure"` +} + +// String returns the string representation +func (s AdministrativeAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdministrativeAction) GoString() string { + return s.String() +} + +// SetAdministrativeActionType sets the AdministrativeActionType field's value. +func (s *AdministrativeAction) SetAdministrativeActionType(v string) *AdministrativeAction { + s.AdministrativeActionType = &v + return s +} + +// SetFailureDetails sets the FailureDetails field's value. +func (s *AdministrativeAction) SetFailureDetails(v *AdministrativeActionFailureDetails) *AdministrativeAction { + s.FailureDetails = v + return s +} + +// SetProgressPercent sets the ProgressPercent field's value. +func (s *AdministrativeAction) SetProgressPercent(v int64) *AdministrativeAction { + s.ProgressPercent = &v + return s +} + +// SetRequestTime sets the RequestTime field's value. +func (s *AdministrativeAction) SetRequestTime(v time.Time) *AdministrativeAction { + s.RequestTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AdministrativeAction) SetStatus(v string) *AdministrativeAction { + s.Status = &v + return s +} + +// SetTargetFileSystemValues sets the TargetFileSystemValues field's value. +func (s *AdministrativeAction) SetTargetFileSystemValues(v *FileSystem) *AdministrativeAction { + s.TargetFileSystemValues = v + return s +} + +// Provides information about a failed administrative action. +type AdministrativeActionFailureDetails struct { + _ struct{} `type:"structure"` + + // Error message providing details about the failed administrative action. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AdministrativeActionFailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdministrativeActionFailureDetails) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *AdministrativeActionFailureDetails) SetMessage(v string) *AdministrativeActionFailureDetails { + s.Message = &v + return s +} + +// A DNS alias that is associated with the file system. You can use a DNS alias +// to access a file system using user-defined DNS names, in addition to the +// default DNS name that Amazon FSx assigns to the file system. For more information, +// see DNS aliases (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html) +// in the FSx for Windows File Server User Guide. +type Alias struct { + _ struct{} `type:"structure"` + + // Describes the state of the DNS alias. + // + // * AVAILABLE - The DNS alias is associated with an Amazon FSx file system. + // + // * CREATING - Amazon FSx is creating the DNS alias and associating it with + // the file system. + // + // * CREATE_FAILED - Amazon FSx was unable to associate the DNS alias with + // the file system. + // + // * DELETING - Amazon FSx is disassociating the DNS alias from the file + // system and deleting it. + // + // * DELETE_FAILED - Amazon FSx was unable to disassocate the DNS alias from + // the file system. + Lifecycle *string `type:"string" enum:"AliasLifecycle"` + + // The name of the DNS alias. The alias name has to meet the following requirements: + // + // * Formatted as a fully-qualified domain name (FQDN), hostname.domain, + // for example, accounting.example.com. + // + // * Can contain alphanumeric characters and the hyphen (-). + // + // * Cannot start or end with a hyphen. + // + // * Can start with a numeric. + // + // For DNS names, Amazon FSx stores alphabetic characters as lowercase letters + // (a-z), regardless of how you specify them: as uppercase letters, lowercase + // letters, or the corresponding letters in escape codes. + Name *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s Alias) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alias) GoString() string { + return s.String() +} + +// SetLifecycle sets the Lifecycle field's value. +func (s *Alias) SetLifecycle(v string) *Alias { + s.Lifecycle = &v + return s +} + +// SetName sets the Name field's value. +func (s *Alias) SetName(v string) *Alias { + s.Name = &v + return s +} + +// The request object specifying one or more DNS alias names to associate with +// an Amazon FSx for Windows File Server file system. +type AssociateFileSystemAliasesInput struct { + _ struct{} `type:"structure"` + + // An array of one or more DNS alias names to associate with the file system. + // The alias name has to comply with the following formatting requirements: + // + // * Formatted as a fully-qualified domain name (FQDN), hostname.domain , + // for example, accounting.corp.example.com. + // + // * Can contain alphanumeric characters and the hyphen (-). + // + // * Cannot start or end with a hyphen. + // + // * Can start with a numeric. + // + // For DNS alias names, Amazon FSx stores alphabetic characters as lowercase + // letters (a-z), regardless of how you specify them: as uppercase letters, + // lowercase letters, or the corresponding letters in escape codes. + // + // Aliases is a required field + Aliases []*string `type:"list" required:"true"` + + // (Optional) An idempotency token for resource creation, in a string of up + // to 64 ASCII characters. This token is automatically filled on your behalf + // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Specifies the file system with which you want to associate one or more DNS + // aliases. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` } // String returns the string representation -func (s AdministrativeAction) String() string { +func (s AssociateFileSystemAliasesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AdministrativeAction) GoString() string { +func (s AssociateFileSystemAliasesInput) GoString() string { return s.String() } -// SetAdministrativeActionType sets the AdministrativeActionType field's value. -func (s *AdministrativeAction) SetAdministrativeActionType(v string) *AdministrativeAction { - s.AdministrativeActionType = &v - return s -} - -// SetFailureDetails sets the FailureDetails field's value. -func (s *AdministrativeAction) SetFailureDetails(v *AdministrativeActionFailureDetails) *AdministrativeAction { - s.FailureDetails = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateFileSystemAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateFileSystemAliasesInput"} + if s.Aliases == nil { + invalidParams.Add(request.NewErrParamRequired("Aliases")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } -// SetProgressPercent sets the ProgressPercent field's value. -func (s *AdministrativeAction) SetProgressPercent(v int64) *AdministrativeAction { - s.ProgressPercent = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetRequestTime sets the RequestTime field's value. -func (s *AdministrativeAction) SetRequestTime(v time.Time) *AdministrativeAction { - s.RequestTime = &v +// SetAliases sets the Aliases field's value. +func (s *AssociateFileSystemAliasesInput) SetAliases(v []*string) *AssociateFileSystemAliasesInput { + s.Aliases = v return s } -// SetStatus sets the Status field's value. -func (s *AdministrativeAction) SetStatus(v string) *AdministrativeAction { - s.Status = &v +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *AssociateFileSystemAliasesInput) SetClientRequestToken(v string) *AssociateFileSystemAliasesInput { + s.ClientRequestToken = &v return s } -// SetTargetFileSystemValues sets the TargetFileSystemValues field's value. -func (s *AdministrativeAction) SetTargetFileSystemValues(v *FileSystem) *AdministrativeAction { - s.TargetFileSystemValues = v +// SetFileSystemId sets the FileSystemId field's value. +func (s *AssociateFileSystemAliasesInput) SetFileSystemId(v string) *AssociateFileSystemAliasesInput { + s.FileSystemId = &v return s } -// Provides information about a failed administrative action. -type AdministrativeActionFailureDetails struct { +// The system generated response showing the DNS aliases that Amazon FSx is +// attempting to associate with the file system. Use the API operation to monitor +// the status of the aliases Amazon FSx is associating with the file system. +// It can take up to 2.5 minutes for the alias status to change from CREATING +// to AVAILABLE. +type AssociateFileSystemAliasesOutput struct { _ struct{} `type:"structure"` - // Error message providing details about the failure. - Message *string `min:"1" type:"string"` + // An array of the DNS aliases that Amazon FSx is associating with the file + // system. + Aliases []*Alias `type:"list"` } // String returns the string representation -func (s AdministrativeActionFailureDetails) String() string { +func (s AssociateFileSystemAliasesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AdministrativeActionFailureDetails) GoString() string { +func (s AssociateFileSystemAliasesOutput) GoString() string { return s.String() } -// SetMessage sets the Message field's value. -func (s *AdministrativeActionFailureDetails) SetMessage(v string) *AdministrativeActionFailureDetails { - s.Message = &v +// SetAliases sets the Aliases field's value. +func (s *AssociateFileSystemAliasesOutput) SetAliases(v []*Alias) *AssociateFileSystemAliasesOutput { + s.Aliases = v return s } @@ -3487,6 +4011,36 @@ type CreateFileSystemWindowsConfiguration struct { // that the file system should join when it's created. ActiveDirectoryId *string `min:"12" type:"string"` + // An array of one or more DNS alias names that you want to associate with the + // Amazon FSx file system. Aliases allow you to use existing DNS names to access + // the data in your Amazon FSx file system. You can associate up to 50 aliases + // with a file system at any time. You can associate additional DNS aliases + // after you create the file system using the AssociateFileSystemAliases operation. + // You can remove DNS aliases from the file system after it is created using + // the DisassociateFileSystemAliases operation. You only need to specify the + // alias name in the request payload. + // + // For more information, see Working with DNS Aliases (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html) + // and Walkthrough 5: Using DNS aliases to access your file system (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/walkthrough05-file-system-custom-CNAME.html), + // including additional steps you must take to be able to access your file system + // using a DNS alias. + // + // An alias name has to meet the following requirements: + // + // * Formatted as a fully-qualified domain name (FQDN), hostname.domain, + // for example, accounting.example.com. + // + // * Can contain alphanumeric characters and the hyphen (-). + // + // * Cannot start or end with a hyphen. + // + // * Can start with a numeric. + // + // For DNS alias names, Amazon FSx stores alphabetic characters as lowercase + // letters (a-z), regardless of how you specify them: as uppercase letters, + // lowercase letters, or the corresponding letters in escape codes. + Aliases []*string `type:"list"` + // The number of days to retain automatic backups. The default is to retain // backups for 7 days. Setting this value to 0 disables the creation of automatic // backups. The maximum retention period for backups is 90 days. @@ -3596,6 +4150,12 @@ func (s *CreateFileSystemWindowsConfiguration) SetActiveDirectoryId(v string) *C return s } +// SetAliases sets the Aliases field's value. +func (s *CreateFileSystemWindowsConfiguration) SetAliases(v []*string) *CreateFileSystemWindowsConfiguration { + s.Aliases = v + return s +} + // SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. func (s *CreateFileSystemWindowsConfiguration) SetAutomaticBackupRetentionDays(v int64) *CreateFileSystemWindowsConfiguration { s.AutomaticBackupRetentionDays = &v @@ -4899,6 +5459,126 @@ func (s *DescribeDataRepositoryTasksOutput) SetNextToken(v string) *DescribeData return s } +// The request object for DescribeFileSystemAliases operation. +type DescribeFileSystemAliasesInput struct { + _ struct{} `type:"structure"` + + // (Optional) An idempotency token for resource creation, in a string of up + // to 64 ASCII characters. This token is automatically filled on your behalf + // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The ID of the file system to return the associated DNS aliases for (String). + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` + + // Maximum number of DNS aliases to return in the response (integer). This parameter + // value must be greater than 0. The number of items that Amazon FSx returns + // is the minimum of the MaxResults parameter specified in the request and the + // service's internal maximum number of items per page. + MaxResults *int64 `min:"1" type:"integer"` + + // Opaque pagination token returned from a previous DescribeFileSystemAliases + // operation (String). If a token is included in the request, the action continues + // the list from where the previous returning call left off. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFileSystemAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFileSystemAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFileSystemAliasesInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *DescribeFileSystemAliasesInput) SetClientRequestToken(v string) *DescribeFileSystemAliasesInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *DescribeFileSystemAliasesInput) SetFileSystemId(v string) *DescribeFileSystemAliasesInput { + s.FileSystemId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFileSystemAliasesInput) SetMaxResults(v int64) *DescribeFileSystemAliasesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFileSystemAliasesInput) SetNextToken(v string) *DescribeFileSystemAliasesInput { + s.NextToken = &v + return s +} + +// The response object for DescribeFileSystemAliases operation. +type DescribeFileSystemAliasesOutput struct { + _ struct{} `type:"structure"` + + // An array of one or more DNS aliases currently associated with the specified + // file system. + Aliases []*Alias `type:"list"` + + // Present if there are more DNS aliases than returned in the response (String). + // You can use the NextToken value in a later request to fetch additional descriptions. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFileSystemAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFileSystemAliasesOutput) GoString() string { + return s.String() +} + +// SetAliases sets the Aliases field's value. +func (s *DescribeFileSystemAliasesOutput) SetAliases(v []*Alias) *DescribeFileSystemAliasesOutput { + s.Aliases = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFileSystemAliasesOutput) SetNextToken(v string) *DescribeFileSystemAliasesOutput { + s.NextToken = &v + return s +} + // The request object for DescribeFileSystems operation. type DescribeFileSystemsInput struct { _ struct{} `type:"structure"` @@ -4996,6 +5676,105 @@ func (s *DescribeFileSystemsOutput) SetNextToken(v string) *DescribeFileSystemsO return s } +// The request object of DNS aliases to disassociate from an Amazon FSx for +// Windows File Server file system. +type DisassociateFileSystemAliasesInput struct { + _ struct{} `type:"structure"` + + // An array of one or more DNS alias names to disassociate, or remove, from + // the file system. + // + // Aliases is a required field + Aliases []*string `type:"list" required:"true"` + + // (Optional) An idempotency token for resource creation, in a string of up + // to 64 ASCII characters. This token is automatically filled on your behalf + // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Specifies the file system from which to disassociate the DNS aliases. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateFileSystemAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateFileSystemAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateFileSystemAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateFileSystemAliasesInput"} + if s.Aliases == nil { + invalidParams.Add(request.NewErrParamRequired("Aliases")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliases sets the Aliases field's value. +func (s *DisassociateFileSystemAliasesInput) SetAliases(v []*string) *DisassociateFileSystemAliasesInput { + s.Aliases = v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *DisassociateFileSystemAliasesInput) SetClientRequestToken(v string) *DisassociateFileSystemAliasesInput { + s.ClientRequestToken = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *DisassociateFileSystemAliasesInput) SetFileSystemId(v string) *DisassociateFileSystemAliasesInput { + s.FileSystemId = &v + return s +} + +// The system generated response showing the DNS aliases that Amazon FSx is +// attempting to disassociate from the file system. Use the API operation to +// monitor the status of the aliases Amazon FSx is removing from the file system. +type DisassociateFileSystemAliasesOutput struct { + _ struct{} `type:"structure"` + + // An array of one or more DNS aliases that Amazon FSx is attempting to disassociate + // from the file system. + Aliases []*Alias `type:"list"` +} + +// String returns the string representation +func (s DisassociateFileSystemAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateFileSystemAliasesOutput) GoString() string { + return s.String() +} + +// SetAliases sets the Aliases field's value. +func (s *DisassociateFileSystemAliasesOutput) SetAliases(v []*Alias) *DisassociateFileSystemAliasesOutput { + s.Aliases = v + return s +} + // A description of a specific Amazon FSx file system. type FileSystem struct { _ struct{} `type:"structure"` @@ -7142,6 +7921,17 @@ type WindowsFileSystemConfiguration struct { // system should join when it's created. ActiveDirectoryId *string `min:"12" type:"string"` + // An array of one or more DNS aliases that are currently associated with the + // Amazon FSx file system. Aliases allow you to use existing DNS names to access + // the data in your Amazon FSx file system. You can associate up to 50 aliases + // with a file system at any time. You can associate additional DNS aliases + // after you create the file system using the AssociateFileSystemAliases operation. + // You can remove DNS aliases from the file system after it is created using + // the DisassociateFileSystemAliases operation. You only need to specify the + // alias name in the request payload. For more information, see DNS aliases + // (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-dns-aliases.html). + Aliases []*Alias `type:"list"` + // The number of days to retain automatic backups. Setting this to 0 disables // automatic backups. You can retain automatic backups for a maximum of 90 days. AutomaticBackupRetentionDays *int64 `type:"integer"` @@ -7238,6 +8028,12 @@ func (s *WindowsFileSystemConfiguration) SetActiveDirectoryId(v string) *Windows return s } +// SetAliases sets the Aliases field's value. +func (s *WindowsFileSystemConfiguration) SetAliases(v []*Alias) *WindowsFileSystemConfiguration { + s.Aliases = v + return s +} + // SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. func (s *WindowsFileSystemConfiguration) SetAutomaticBackupRetentionDays(v int64) *WindowsFileSystemConfiguration { s.AutomaticBackupRetentionDays = &v @@ -7338,7 +8134,7 @@ func ActiveDirectoryErrorType_Values() []string { // // * FILE_SYSTEM_UPDATE - A file system update administrative action initiated // by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI -// (update-file-system). A +// (update-file-system). // // * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase // a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION @@ -7347,12 +8143,24 @@ func ActiveDirectoryErrorType_Values() []string { // progress using the ProgressPercent property. When STORAGE_OPTIMIZATION // completes successfully, the parent FILE_SYSTEM_UPDATE action status changes // to COMPLETED. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). +// +// * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a +// new DNS alias with the file system. For more information, see . +// +// * FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate +// a DNS alias from the file system. For more information, see . const ( // AdministrativeActionTypeFileSystemUpdate is a AdministrativeActionType enum value AdministrativeActionTypeFileSystemUpdate = "FILE_SYSTEM_UPDATE" // AdministrativeActionTypeStorageOptimization is a AdministrativeActionType enum value AdministrativeActionTypeStorageOptimization = "STORAGE_OPTIMIZATION" + + // AdministrativeActionTypeFileSystemAliasAssociation is a AdministrativeActionType enum value + AdministrativeActionTypeFileSystemAliasAssociation = "FILE_SYSTEM_ALIAS_ASSOCIATION" + + // AdministrativeActionTypeFileSystemAliasDisassociation is a AdministrativeActionType enum value + AdministrativeActionTypeFileSystemAliasDisassociation = "FILE_SYSTEM_ALIAS_DISASSOCIATION" ) // AdministrativeActionType_Values returns all elements of the AdministrativeActionType enum @@ -7360,6 +8168,36 @@ func AdministrativeActionType_Values() []string { return []string{ AdministrativeActionTypeFileSystemUpdate, AdministrativeActionTypeStorageOptimization, + AdministrativeActionTypeFileSystemAliasAssociation, + AdministrativeActionTypeFileSystemAliasDisassociation, + } +} + +const ( + // AliasLifecycleAvailable is a AliasLifecycle enum value + AliasLifecycleAvailable = "AVAILABLE" + + // AliasLifecycleCreating is a AliasLifecycle enum value + AliasLifecycleCreating = "CREATING" + + // AliasLifecycleDeleting is a AliasLifecycle enum value + AliasLifecycleDeleting = "DELETING" + + // AliasLifecycleCreateFailed is a AliasLifecycle enum value + AliasLifecycleCreateFailed = "CREATE_FAILED" + + // AliasLifecycleDeleteFailed is a AliasLifecycle enum value + AliasLifecycleDeleteFailed = "DELETE_FAILED" +) + +// AliasLifecycle_Values returns all elements of the AliasLifecycle enum +func AliasLifecycle_Values() []string { + return []string{ + AliasLifecycleAvailable, + AliasLifecycleCreating, + AliasLifecycleDeleting, + AliasLifecycleCreateFailed, + AliasLifecycleDeleteFailed, } } @@ -7430,6 +8268,9 @@ const ( // BackupTypeUserInitiated is a BackupType enum value BackupTypeUserInitiated = "USER_INITIATED" + + // BackupTypeAwsBackup is a BackupType enum value + BackupTypeAwsBackup = "AWS_BACKUP" ) // BackupType_Values returns all elements of the BackupType enum @@ -7437,6 +8278,7 @@ func BackupType_Values() []string { return []string{ BackupTypeAutomatic, BackupTypeUserInitiated, + BackupTypeAwsBackup, } } diff --git a/service/fsx/examples_test.go b/service/fsx/examples_test.go index daa6745599f..633bccd6da9 100644 --- a/service/fsx/examples_test.go +++ b/service/fsx/examples_test.go @@ -74,17 +74,18 @@ func ExampleFSx_CreateBackup_shared00() { // To create a new file system // -// This operation creates a new file system. +// This operation creates a new Amazon FSx for Windows File Server file system. func ExampleFSx_CreateFileSystem_shared00() { svc := fsx.New(session.New()) input := &fsx.CreateFileSystemInput{ ClientRequestToken: aws.String("a8ca07e4-61ec-4399-99f4-19853801bcd5"), FileSystemType: aws.String("WINDOWS"), - KmsKeyId: aws.String("arn:aws:kms:us-east-1:012345678912:key/0ff3ea8d-130e-4133-877f-93908b6fdbd6"), + KmsKeyId: aws.String("arn:aws:kms:us-east-1:012345678912:key/1111abcd-2222-3333-4444-55556666eeff"), SecurityGroupIds: []*string{ aws.String("sg-edcd9784"), }, - StorageCapacity: aws.Int64(300), + StorageCapacity: aws.Int64(3200), + StorageType: aws.String("HDD"), SubnetIds: []*string{ aws.String("subnet-1234abcd"), }, @@ -95,10 +96,13 @@ func ExampleFSx_CreateFileSystem_shared00() { }, }, WindowsConfiguration: &fsx.CreateFileSystemWindowsConfiguration{ - ActiveDirectoryId: aws.String("d-1234abcd12"), + ActiveDirectoryId: aws.String("d-1234abcd12"), + Aliases: []*string{ + aws.String("accounting.corp.example.com"), + }, AutomaticBackupRetentionDays: aws.Int64(30), DailyAutomaticBackupStartTime: aws.String("05:00"), - ThroughputCapacity: aws.Int64(8), + ThroughputCapacity: aws.Int64(32), WeeklyMaintenanceStartTime: aws.String("1:05:00"), }, } diff --git a/service/fsx/fsxiface/interface.go b/service/fsx/fsxiface/interface.go index 0cd98a2526b..5879988ebef 100644 --- a/service/fsx/fsxiface/interface.go +++ b/service/fsx/fsxiface/interface.go @@ -26,7 +26,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // Amazon FSx. // func myFunc(svc fsxiface.FSxAPI) bool { -// // Make svc.CancelDataRepositoryTask request +// // Make svc.AssociateFileSystemAliases request // } // // func main() { @@ -42,7 +42,7 @@ import ( // type mockFSxClient struct { // fsxiface.FSxAPI // } -// func (m *mockFSxClient) CancelDataRepositoryTask(input *fsx.CancelDataRepositoryTaskInput) (*fsx.CancelDataRepositoryTaskOutput, error) { +// func (m *mockFSxClient) AssociateFileSystemAliases(input *fsx.AssociateFileSystemAliasesInput) (*fsx.AssociateFileSystemAliasesOutput, error) { // // mock response/functionality // } // @@ -60,6 +60,10 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type FSxAPI interface { + AssociateFileSystemAliases(*fsx.AssociateFileSystemAliasesInput) (*fsx.AssociateFileSystemAliasesOutput, error) + AssociateFileSystemAliasesWithContext(aws.Context, *fsx.AssociateFileSystemAliasesInput, ...request.Option) (*fsx.AssociateFileSystemAliasesOutput, error) + AssociateFileSystemAliasesRequest(*fsx.AssociateFileSystemAliasesInput) (*request.Request, *fsx.AssociateFileSystemAliasesOutput) + CancelDataRepositoryTask(*fsx.CancelDataRepositoryTaskInput) (*fsx.CancelDataRepositoryTaskOutput, error) CancelDataRepositoryTaskWithContext(aws.Context, *fsx.CancelDataRepositoryTaskInput, ...request.Option) (*fsx.CancelDataRepositoryTaskOutput, error) CancelDataRepositoryTaskRequest(*fsx.CancelDataRepositoryTaskInput) (*request.Request, *fsx.CancelDataRepositoryTaskOutput) @@ -102,6 +106,13 @@ type FSxAPI interface { DescribeDataRepositoryTasksPages(*fsx.DescribeDataRepositoryTasksInput, func(*fsx.DescribeDataRepositoryTasksOutput, bool) bool) error DescribeDataRepositoryTasksPagesWithContext(aws.Context, *fsx.DescribeDataRepositoryTasksInput, func(*fsx.DescribeDataRepositoryTasksOutput, bool) bool, ...request.Option) error + DescribeFileSystemAliases(*fsx.DescribeFileSystemAliasesInput) (*fsx.DescribeFileSystemAliasesOutput, error) + DescribeFileSystemAliasesWithContext(aws.Context, *fsx.DescribeFileSystemAliasesInput, ...request.Option) (*fsx.DescribeFileSystemAliasesOutput, error) + DescribeFileSystemAliasesRequest(*fsx.DescribeFileSystemAliasesInput) (*request.Request, *fsx.DescribeFileSystemAliasesOutput) + + DescribeFileSystemAliasesPages(*fsx.DescribeFileSystemAliasesInput, func(*fsx.DescribeFileSystemAliasesOutput, bool) bool) error + DescribeFileSystemAliasesPagesWithContext(aws.Context, *fsx.DescribeFileSystemAliasesInput, func(*fsx.DescribeFileSystemAliasesOutput, bool) bool, ...request.Option) error + DescribeFileSystems(*fsx.DescribeFileSystemsInput) (*fsx.DescribeFileSystemsOutput, error) DescribeFileSystemsWithContext(aws.Context, *fsx.DescribeFileSystemsInput, ...request.Option) (*fsx.DescribeFileSystemsOutput, error) DescribeFileSystemsRequest(*fsx.DescribeFileSystemsInput) (*request.Request, *fsx.DescribeFileSystemsOutput) @@ -109,6 +120,10 @@ type FSxAPI interface { DescribeFileSystemsPages(*fsx.DescribeFileSystemsInput, func(*fsx.DescribeFileSystemsOutput, bool) bool) error DescribeFileSystemsPagesWithContext(aws.Context, *fsx.DescribeFileSystemsInput, func(*fsx.DescribeFileSystemsOutput, bool) bool, ...request.Option) error + DisassociateFileSystemAliases(*fsx.DisassociateFileSystemAliasesInput) (*fsx.DisassociateFileSystemAliasesOutput, error) + DisassociateFileSystemAliasesWithContext(aws.Context, *fsx.DisassociateFileSystemAliasesInput, ...request.Option) (*fsx.DisassociateFileSystemAliasesOutput, error) + DisassociateFileSystemAliasesRequest(*fsx.DisassociateFileSystemAliasesInput) (*request.Request, *fsx.DisassociateFileSystemAliasesOutput) + ListTagsForResource(*fsx.ListTagsForResourceInput) (*fsx.ListTagsForResourceOutput, error) ListTagsForResourceWithContext(aws.Context, *fsx.ListTagsForResourceInput, ...request.Option) (*fsx.ListTagsForResourceOutput, error) ListTagsForResourceRequest(*fsx.ListTagsForResourceInput) (*request.Request, *fsx.ListTagsForResourceOutput) diff --git a/service/iotanalytics/api.go b/service/iotanalytics/api.go index a0c0e800da6..b5b157f5e06 100644 --- a/service/iotanalytics/api.go +++ b/service/iotanalytics/api.go @@ -335,11 +335,11 @@ func (c *IoTAnalytics) CreateDatasetRequest(input *CreateDatasetInput) (req *req // CreateDataset API operation for AWS IoT Analytics. // -// Creates a data set. A data set stores data retrieved from a data store by -// applying a "queryAction" (a SQL query) or a "containerAction" (executing -// a containerized application). This operation creates the skeleton of a data -// set. The data set can be populated manually by calling "CreateDatasetContent" -// or automatically according to a "trigger" you specify. +// Creates a dataset. A dataset stores data retrieved from a data store by applying +// a queryAction (a SQL query) or a containerAction (executing a containerized +// application). This operation creates the skeleton of a dataset. The dataset +// can be populated manually by calling CreateDatasetContent or automatically +// according to a trigger you specify. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -433,8 +433,8 @@ func (c *IoTAnalytics) CreateDatasetContentRequest(input *CreateDatasetContentIn // CreateDatasetContent API operation for AWS IoT Analytics. // -// Creates the content of a data set by applying a "queryAction" (a SQL query) -// or a "containerAction" (executing a containerized application). +// Creates the content of a data set by applying a queryAction (a SQL query) +// or a containerAction (executing a containerized application). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -809,10 +809,10 @@ func (c *IoTAnalytics) DeleteDatasetRequest(input *DeleteDatasetInput) (req *req // DeleteDataset API operation for AWS IoT Analytics. // -// Deletes the specified data set. +// Deletes the specified dataset. // -// You do not have to delete the content of the data set before you perform -// this operation. +// You do not have to delete the content of the dataset before you perform this +// operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -904,7 +904,7 @@ func (c *IoTAnalytics) DeleteDatasetContentRequest(input *DeleteDatasetContentIn // DeleteDatasetContent API operation for AWS IoT Analytics. // -// Deletes the content of the specified data set. +// Deletes the content of the specified dataset. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1270,7 +1270,7 @@ func (c *IoTAnalytics) DescribeDatasetRequest(input *DescribeDatasetInput) (req // DescribeDataset API operation for AWS IoT Analytics. // -// Retrieves information about a data set. +// Retrieves information about a dataset. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1634,7 +1634,7 @@ func (c *IoTAnalytics) GetDatasetContentRequest(input *GetDatasetContentInput) ( // GetDatasetContent API operation for AWS IoT Analytics. // -// Retrieves the contents of a data set as pre-signed URIs. +// Retrieves the contents of a data set as presigned URIs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2458,7 +2458,7 @@ func (c *IoTAnalytics) ListTagsForResourceRequest(input *ListTagsForResourceInpu // ListTagsForResource API operation for AWS IoT Analytics. // -// Lists the tags (metadata) which you have assigned to the resource. +// Lists the tags (metadata) that you have assigned to the resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2555,11 +2555,10 @@ func (c *IoTAnalytics) PutLoggingOptionsRequest(input *PutLoggingOptionsInput) ( // // Sets or updates the AWS IoT Analytics logging options. // -// Note that if you update the value of any loggingOptions field, it takes up -// to one minute for the change to take effect. Also, if you change the policy -// attached to the role you specified in the roleArn field (for example, to -// correct an invalid policy) it takes up to 5 minutes for that change to take -// effect. +// If you update the value of any loggingOptions field, it takes up to one minute +// for the change to take effect. Also, if you change the policy attached to +// the role you specified in the roleArn field (for example, to correct an invalid +// policy), it takes up to five minutes for that change to take effect. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2922,7 +2921,7 @@ func (c *IoTAnalytics) TagResourceRequest(input *TagResourceInput) (req *request // TagResource API operation for AWS IoT Analytics. // -// Adds to or modifies the tags of the given resource. Tags are metadata which +// Adds to or modifies the tags of the given resource. Tags are metadata that // can be used to manage a resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3446,16 +3445,16 @@ func (c *IoTAnalytics) UpdatePipelineWithContext(ctx aws.Context, input *UpdateP type AddAttributesActivity struct { _ struct{} `type:"structure"` - // A list of 1-50 "AttributeNameMapping" objects that map an existing attribute + // A list of 1-50 AttributeNameMapping objects that map an existing attribute // to a new attribute. // // The existing attributes remain in the message, so if you want to remove the - // originals, use "RemoveAttributeActivity". + // originals, use RemoveAttributeActivity. // // Attributes is a required field Attributes map[string]*string `locationName:"attributes" min:"1" type:"map" required:"true"` - // The name of the 'addAttributes' activity. + // The name of the addAttributes activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -3527,8 +3526,8 @@ type BatchPutMessageErrorEntry struct { // The message associated with the error. ErrorMessage *string `locationName:"errorMessage" type:"string"` - // The ID of the message that caused the error. (See the value corresponding - // to the "messageId" key in the message object.) + // The ID of the message that caused the error. See the value corresponding + // to the messageId key in the message object. MessageId *string `locationName:"messageId" min:"1" type:"string"` } @@ -3568,13 +3567,12 @@ type BatchPutMessageInput struct { // ChannelName is a required field ChannelName *string `locationName:"channelName" min:"1" type:"string" required:"true"` - // The list of messages to be sent. Each message has format: '{ "messageId": - // "string", "payload": "string"}'. + // The list of messages to be sent. Each message has the format: { "messageId": + // "string", "payload": "string"}. // - // Note that the field names of message payloads (data) that you send to AWS - // IoT Analytics: + // The field names of message payloads (data) that you send to AWS IoT Analytics: // - // * Must contain only alphanumeric characters and undescores (_); no other + // * Must contain only alphanumeric characters and undescores (_). No other // special characters are allowed. // // * Must begin with an alphabetic character or single underscore (_). @@ -3583,9 +3581,9 @@ type BatchPutMessageInput struct { // // * In regular expression terms: "^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$". // - // * Cannot be greater than 255 characters. + // * Cannot be more than 255 characters. // - // * Are case-insensitive. (Fields named "foo" and "FOO" in the same payload + // * Are case insensitive. (Fields named foo and FOO in the same payload // are considered duplicates.) // // For example, {"temp_01": 29} or {"_temp_01": 29} are valid, but {"temp-01": @@ -3677,7 +3675,7 @@ type CancelPipelineReprocessingInput struct { // PipelineName is a required field PipelineName *string `location:"uri" locationName:"pipelineName" min:"1" type:"string" required:"true"` - // The ID of the reprocessing task (returned by "StartPipelineReprocessing"). + // The ID of the reprocessing task (returned by StartPipelineReprocessing). // // ReprocessingId is a required field ReprocessingId *string `location:"uri" locationName:"reprocessingId" type:"string" required:"true"` @@ -3752,6 +3750,15 @@ type Channel struct { // When the channel was created. CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + // The last time when a new message arrived in the channel. + // + // AWS IoT Analytics updates this value at most once per minute for one channel. + // Hence, the lastMessageArrivalTime value is an approximation. + // + // This feature only applies to messages that arrived in the data store after + // October 23, 2020. + LastMessageArrivalTime *time.Time `locationName:"lastMessageArrivalTime" type:"timestamp"` + // When the channel was last updated. LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` @@ -3764,9 +3771,9 @@ type Channel struct { // The status of the channel. Status *string `locationName:"status" type:"string" enum:"ChannelStatus"` - // Where channel data is stored. You may choose one of "serviceManagedS3" or - // "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". - // This cannot be changed after creation of the channel. + // Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 + // storage. If not specified, the default is serviceManagedS3. You cannot change + // this storage option after the channel is created. Storage *ChannelStorage `locationName:"storage" type:"structure"` } @@ -3792,6 +3799,12 @@ func (s *Channel) SetCreationTime(v time.Time) *Channel { return s } +// SetLastMessageArrivalTime sets the LastMessageArrivalTime field's value. +func (s *Channel) SetLastMessageArrivalTime(v time.Time) *Channel { + s.LastMessageArrivalTime = &v + return s +} + // SetLastUpdateTime sets the LastUpdateTime field's value. func (s *Channel) SetLastUpdateTime(v time.Time) *Channel { s.LastUpdateTime = &v @@ -3831,7 +3844,7 @@ type ChannelActivity struct { // ChannelName is a required field ChannelName *string `locationName:"channelName" min:"1" type:"string" required:"true"` - // The name of the 'channel' activity. + // The name of the channel activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -3917,21 +3930,21 @@ func (s *ChannelStatistics) SetSize(v *EstimatedResourceSize) *ChannelStatistics return s } -// Where channel data is stored. You may choose one of "serviceManagedS3" or -// "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". -// This cannot be changed after creation of the channel. +// Where channel data is stored. You may choose one of serviceManagedS3 or customerManagedS3 +// storage. If not specified, the default is serviceManagedS3. This cannot be +// changed after creation of the channel. type ChannelStorage struct { _ struct{} `type:"structure"` // Use this to store channel data in an S3 bucket that you manage. If customer - // managed storage is selected, the "retentionPeriod" parameter is ignored. - // The choice of service-managed or customer-managed S3 storage cannot be changed - // after creation of the channel. + // managed storage is selected, the retentionPeriod parameter is ignored. You + // cannot change the choice of service-managed or customer-managed S3 storage + // after the channel is created. CustomerManagedS3 *CustomerManagedChannelS3Storage `locationName:"customerManagedS3" type:"structure"` - // Use this to store channel data in an S3 bucket managed by the AWS IoT Analytics - // service. The choice of service-managed or customer-managed S3 storage cannot - // be changed after creation of the channel. + // Use this to store channel data in an S3 bucket managed by AWS IoT Analytics. + // You cannot change the choice of service-managed or customer-managed S3 storage + // after the channel is created. ServiceManagedS3 *ServiceManagedChannelS3Storage `locationName:"serviceManagedS3" type:"structure"` } @@ -3979,8 +3992,7 @@ type ChannelStorageSummary struct { // Used to store channel data in an S3 bucket that you manage. CustomerManagedS3 *CustomerManagedChannelS3StorageSummary `locationName:"customerManagedS3" type:"structure"` - // Used to store channel data in an S3 bucket managed by the AWS IoT Analytics - // service. + // Used to store channel data in an S3 bucket managed by AWS IoT Analytics. ServiceManagedS3 *ServiceManagedChannelS3StorageSummary `locationName:"serviceManagedS3" type:"structure"` } @@ -4019,6 +4031,15 @@ type ChannelSummary struct { // When the channel was created. CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + // The last time when a new message arrived in the channel. + // + // AWS IoT Analytics updates this value at most once per minute for one channel. + // Hence, the lastMessageArrivalTime value is an approximation. + // + // This feature only applies to messages that arrived in the data store after + // October 23, 2020. + LastMessageArrivalTime *time.Time `locationName:"lastMessageArrivalTime" type:"timestamp"` + // The last time the channel was updated. LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` @@ -4054,6 +4075,12 @@ func (s *ChannelSummary) SetCreationTime(v time.Time) *ChannelSummary { return s } +// SetLastMessageArrivalTime sets the LastMessageArrivalTime field's value. +func (s *ChannelSummary) SetLastMessageArrivalTime(v time.Time) *ChannelSummary { + s.LastMessageArrivalTime = &v + return s +} + // SetLastUpdateTime sets the LastUpdateTime field's value. func (s *ChannelSummary) SetLastUpdateTime(v time.Time) *ChannelSummary { s.LastUpdateTime = &v @@ -4066,34 +4093,34 @@ func (s *ChannelSummary) SetStatus(v string) *ChannelSummary { return s } -// Information needed to run the "containerAction" to produce data set contents. +// Information required to run the containerAction to produce dataset contents. type ContainerDatasetAction struct { _ struct{} `type:"structure"` - // The ARN of the role which gives permission to the system to access needed - // resources in order to run the "containerAction". This includes, at minimum, - // permission to retrieve the data set contents which are the input to the containerized + // The ARN of the role that gives permission to the system to access required + // resources to run the containerAction. This includes, at minimum, permission + // to retrieve the dataset contents that are the input to the containerized // application. // // ExecutionRoleArn is a required field ExecutionRoleArn *string `locationName:"executionRoleArn" min:"20" type:"string" required:"true"` // The ARN of the Docker container stored in your account. The Docker container - // contains an application and needed support libraries and is used to generate - // data set contents. + // contains an application and required support libraries and is used to generate + // dataset contents. // // Image is a required field Image *string `locationName:"image" type:"string" required:"true"` - // Configuration of the resource which executes the "containerAction". + // Configuration of the resource that executes the containerAction. // // ResourceConfiguration is a required field ResourceConfiguration *ResourceConfiguration `locationName:"resourceConfiguration" type:"structure" required:"true"` - // The values of variables used within the context of the execution of the containerized + // The values of variables used in the context of the execution of the containerized // application (basically, parameters passed to the application). Each variable - // must have a name and a value given by one of "stringValue", "datasetContentVersionValue", - // or "outputFileUriValue". + // must have a name and a value given by one of stringValue, datasetContentVersionValue, + // or outputFileUriValue. Variables []*Variable `locationName:"variables" type:"list"` } @@ -4176,12 +4203,12 @@ type CreateChannelInput struct { // ChannelName is a required field ChannelName *string `locationName:"channelName" min:"1" type:"string" required:"true"` - // Where channel data is stored. You may choose one of "serviceManagedS3" or - // "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". - // This cannot be changed after creation of the channel. + // Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 + // storage. If not specified, the default is serviceManagedS3. You cannot change + // this storage option after the channel is created. ChannelStorage *ChannelStorage `locationName:"channelStorage" type:"structure"` - // How long, in days, message data is kept for the channel. When "customerManagedS3" + // How long, in days, message data is kept for the channel. When customerManagedS3 // storage is selected, this parameter is ignored. RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` @@ -4306,10 +4333,15 @@ func (s *CreateChannelOutput) SetRetentionPeriod(v *RetentionPeriod) *CreateChan type CreateDatasetContentInput struct { _ struct{} `type:"structure"` - // The name of the data set. + // The name of the dataset. // // DatasetName is a required field DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` + + // The version ID of the dataset content. To specify versionId for a dataset + // content, the dataset must use a DeltaTimer (https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html) + // filter. + VersionId *string `locationName:"versionId" min:"7" type:"string"` } // String returns the string representation @@ -4331,6 +4363,9 @@ func (s *CreateDatasetContentInput) Validate() error { if s.DatasetName != nil && len(*s.DatasetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) } + if s.VersionId != nil && len(*s.VersionId) < 7 { + invalidParams.Add(request.NewErrParamMinLen("VersionId", 7)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4344,10 +4379,16 @@ func (s *CreateDatasetContentInput) SetDatasetName(v string) *CreateDatasetConte return s } +// SetVersionId sets the VersionId field's value. +func (s *CreateDatasetContentInput) SetVersionId(v string) *CreateDatasetContentInput { + s.VersionId = &v + return s +} + type CreateDatasetContentOutput struct { _ struct{} `type:"structure"` - // The version ID of the data set contents which are being created. + // The version ID of the dataset contents that are being created. VersionId *string `locationName:"versionId" min:"7" type:"string"` } @@ -4375,7 +4416,7 @@ type CreateDatasetInput struct { // Actions is a required field Actions []*DatasetAction `locationName:"actions" min:"1" type:"list" required:"true"` - // When data set contents are created they are delivered to destinations specified + // When dataset contents are created, they are delivered to destinations specified // here. ContentDeliveryRules []*DatasetContentDeliveryRule `locationName:"contentDeliveryRules" type:"list"` @@ -4384,11 +4425,19 @@ type CreateDatasetInput struct { // DatasetName is a required field DatasetName *string `locationName:"datasetName" min:"1" type:"string" required:"true"` - // [Optional] How long, in days, versions of data set contents are kept for - // the data set. If not specified or set to null, versions of data set contents - // are retained for at most 90 days. The number of versions of data set contents - // retained is determined by the versioningConfiguration parameter. (For more - // information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + // A list of data rules that send notifications to Amazon CloudWatch, when data + // arrives late. To specify lateDataRules, the dataset must use a DeltaTimer + // (https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html) + // filter. + LateDataRules []*LateDataRule `locationName:"lateDataRules" min:"1" type:"list"` + + // Optional. How long, in days, versions of dataset contents are kept for the + // dataset. If not specified or set to null, versions of dataset contents are + // retained for at most 90 days. The number of versions of dataset contents + // retained is determined by the versioningConfiguration parameter. For more + // information, see Keeping Multiple Versions of AWS IoT Analytics Data Sets + // (https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + // in the AWS IoT Analytics User Guide. RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` // Metadata which can be used to manage the data set. @@ -4399,10 +4448,12 @@ type CreateDatasetInput struct { // The list of triggers can be empty or contain up to five DataSetTrigger objects. Triggers []*DatasetTrigger `locationName:"triggers" type:"list"` - // [Optional] How many versions of data set contents are kept. If not specified + // Optional. How many versions of dataset contents are kept. If not specified // or set to null, only the latest version plus the latest succeeded version - // (if they are different) are kept for the time period specified by the "retentionPeriod" - // parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + // (if they are different) are kept for the time period specified by the retentionPeriod + // parameter. For more information, see Keeping Multiple Versions of AWS IoT + // Analytics Data Sets (https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + // in the AWS IoT Analytics User Guide. VersioningConfiguration *VersioningConfiguration `locationName:"versioningConfiguration" type:"structure"` } @@ -4431,6 +4482,9 @@ func (s *CreateDatasetInput) Validate() error { if s.DatasetName != nil && len(*s.DatasetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) } + if s.LateDataRules != nil && len(s.LateDataRules) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LateDataRules", 1)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } @@ -4454,6 +4508,16 @@ func (s *CreateDatasetInput) Validate() error { } } } + if s.LateDataRules != nil { + for i, v := range s.LateDataRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LateDataRules", i), err.(request.ErrInvalidParams)) + } + } + } if s.RetentionPeriod != nil { if err := s.RetentionPeriod.Validate(); err != nil { invalidParams.AddNested("RetentionPeriod", err.(request.ErrInvalidParams)) @@ -4509,6 +4573,12 @@ func (s *CreateDatasetInput) SetDatasetName(v string) *CreateDatasetInput { return s } +// SetLateDataRules sets the LateDataRules field's value. +func (s *CreateDatasetInput) SetLateDataRules(v []*LateDataRule) *CreateDatasetInput { + s.LateDataRules = v + return s +} + // SetRetentionPeriod sets the RetentionPeriod field's value. func (s *CreateDatasetInput) SetRetentionPeriod(v *RetentionPeriod) *CreateDatasetInput { s.RetentionPeriod = v @@ -4536,13 +4606,13 @@ func (s *CreateDatasetInput) SetVersioningConfiguration(v *VersioningConfigurati type CreateDatasetOutput struct { _ struct{} `type:"structure"` - // The ARN of the data set. + // The ARN of the dataset. DatasetArn *string `locationName:"datasetArn" type:"string"` - // The name of the data set. + // The name of the dataset. DatasetName *string `locationName:"datasetName" min:"1" type:"string"` - // How long, in days, data set contents are kept for the data set. + // How long, in days, dataset contents are kept for the dataset. RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` } @@ -4582,12 +4652,12 @@ type CreateDatastoreInput struct { // DatastoreName is a required field DatastoreName *string `locationName:"datastoreName" min:"1" type:"string" required:"true"` - // Where data store data is stored. You may choose one of "serviceManagedS3" - // or "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". - // This cannot be changed after the data store is created. + // Where data store data is stored. You can choose one of serviceManagedS3 or + // customerManagedS3 storage. If not specified, the default is serviceManagedS3. + // You cannot change this storage option after the data store is created. DatastoreStorage *DatastoreStorage `locationName:"datastoreStorage" type:"structure"` - // How long, in days, message data is kept for the data store. When "customerManagedS3" + // How long, in days, message data is kept for the data store. When customerManagedS3 // storage is selected, this parameter is ignored. RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` @@ -4712,15 +4782,15 @@ func (s *CreateDatastoreOutput) SetRetentionPeriod(v *RetentionPeriod) *CreateDa type CreatePipelineInput struct { _ struct{} `type:"structure"` - // A list of "PipelineActivity" objects. Activities perform transformations - // on your messages, such as removing, renaming or adding message attributes; - // filtering messages based on attribute values; invoking your Lambda functions - // on messages for advanced processing; or performing mathematical transformations - // to normalize device data. + // A list of PipelineActivity objects. Activities perform transformations on + // your messages, such as removing, renaming or adding message attributes; filtering + // messages based on attribute values; invoking your Lambda functions on messages + // for advanced processing; or performing mathematical transformations to normalize + // device data. // // The list can be 2-25 PipelineActivity objects and must contain both a channel - // and a datastore activity. Each entry in the list must contain only one activity, - // for example: + // and a datastore activity. Each entry in the list must contain only one activity. + // For example: // // pipelineActivities = [ { "channel": { ... } }, { "lambda": { ... } }, ... // ] @@ -4843,24 +4913,24 @@ func (s *CreatePipelineOutput) SetPipelineName(v string) *CreatePipelineOutput { } // Use this to store channel data in an S3 bucket that you manage. If customer -// managed storage is selected, the "retentionPeriod" parameter is ignored. -// The choice of service-managed or customer-managed S3 storage cannot be changed -// after creation of the channel. +// managed storage is selected, the retentionPeriod parameter is ignored. You +// cannot change the choice of service-managed or customer-managed S3 storage +// after the channel is created. type CustomerManagedChannelS3Storage struct { _ struct{} `type:"structure"` - // The name of the Amazon S3 bucket in which channel data is stored. + // The name of the S3 bucket in which channel data is stored. // // Bucket is a required field Bucket *string `locationName:"bucket" min:"3" type:"string" required:"true"` - // [Optional] The prefix used to create the keys of the channel data objects. - // Each object in an Amazon S3 bucket has a key that is its unique identifier - // within the bucket (each object in a bucket has exactly one key). The prefix - // must end with a '/'. + // Optional. The prefix used to create the keys of the channel data objects. + // Each object in an S3 bucket has a key that is its unique identifier in the + // bucket. Each object in a bucket has exactly one key. The prefix must end + // with a forward slash (/). KeyPrefix *string `locationName:"keyPrefix" min:"1" type:"string"` - // The ARN of the role which grants AWS IoT Analytics permission to interact + // The ARN of the role that grants AWS IoT Analytics permission to interact // with your Amazon S3 resources. // // RoleArn is a required field @@ -4924,16 +4994,16 @@ func (s *CustomerManagedChannelS3Storage) SetRoleArn(v string) *CustomerManagedC type CustomerManagedChannelS3StorageSummary struct { _ struct{} `type:"structure"` - // The name of the Amazon S3 bucket in which channel data is stored. + // The name of the S3 bucket in which channel data is stored. Bucket *string `locationName:"bucket" min:"3" type:"string"` - // [Optional] The prefix used to create the keys of the channel data objects. - // Each object in an Amazon S3 bucket has a key that is its unique identifier - // within the bucket (each object in a bucket has exactly one key). The prefix - // must end with a '/'. + // Optional. The prefix used to create the keys of the channel data objects. + // Each object in an S3 bucket has a key that is its unique identifier within + // the bucket (each object in a bucket has exactly one key). The prefix must + // end with a forward slash (/). KeyPrefix *string `locationName:"keyPrefix" min:"1" type:"string"` - // The ARN of the role which grants AWS IoT Analytics permission to interact + // The ARN of the role that grants AWS IoT Analytics permission to interact // with your Amazon S3 resources. RoleArn *string `locationName:"roleArn" min:"20" type:"string"` } @@ -4966,25 +5036,25 @@ func (s *CustomerManagedChannelS3StorageSummary) SetRoleArn(v string) *CustomerM return s } -// Use this to store data store data in an S3 bucket that you manage. When customer -// managed storage is selected, the "retentionPeriod" parameter is ignored. -// The choice of service-managed or customer-managed S3 storage cannot be changed -// after creation of the data store. +// Use this to store data store data in an S3 bucket that you manage. When customer-managed +// storage is selected, the retentionPeriod parameter is ignored. You cannot +// change the choice of service-managed or customer-managed S3 storage after +// the data store is created. type CustomerManagedDatastoreS3Storage struct { _ struct{} `type:"structure"` - // The name of the Amazon S3 bucket in which data store data is stored. + // The name of the S3 bucket in which data store data is stored. // // Bucket is a required field Bucket *string `locationName:"bucket" min:"3" type:"string" required:"true"` - // [Optional] The prefix used to create the keys of the data store data objects. - // Each object in an Amazon S3 bucket has a key that is its unique identifier - // within the bucket (each object in a bucket has exactly one key). The prefix - // must end with a '/'. + // Optional. The prefix used to create the keys of the data store data objects. + // Each object in an S3 bucket has a key that is its unique identifier in the + // bucket. Each object in a bucket has exactly one key. The prefix must end + // with a forward slash (/). KeyPrefix *string `locationName:"keyPrefix" min:"1" type:"string"` - // The ARN of the role which grants AWS IoT Analytics permission to interact + // The ARN of the role that grants AWS IoT Analytics permission to interact // with your Amazon S3 resources. // // RoleArn is a required field @@ -5048,16 +5118,16 @@ func (s *CustomerManagedDatastoreS3Storage) SetRoleArn(v string) *CustomerManage type CustomerManagedDatastoreS3StorageSummary struct { _ struct{} `type:"structure"` - // The name of the Amazon S3 bucket in which data store data is stored. + // The name of the S3 bucket in which data store data is stored. Bucket *string `locationName:"bucket" min:"3" type:"string"` - // [Optional] The prefix used to create the keys of the data store data objects. - // Each object in an Amazon S3 bucket has a key that is its unique identifier - // within the bucket (each object in a bucket has exactly one key). The prefix - // must end with a '/'. + // Optional. The prefix used to create the keys of the data store data objects. + // Each object in an S3 bucket has a key that is its unique identifier in the + // bucket. Each object in a bucket has exactly one key. The prefix must end + // with a forward slash (/). KeyPrefix *string `locationName:"keyPrefix" min:"1" type:"string"` - // The ARN of the role which grants AWS IoT Analytics permission to interact + // The ARN of the role that grants AWS IoT Analytics permission to interact // with your Amazon S3 resources. RoleArn *string `locationName:"roleArn" min:"20" type:"string"` } @@ -5094,13 +5164,13 @@ func (s *CustomerManagedDatastoreS3StorageSummary) SetRoleArn(v string) *Custome type Dataset struct { _ struct{} `type:"structure"` - // The "DatasetAction" objects that automatically create the data set contents. + // The DatasetAction objects that automatically create the data set contents. Actions []*DatasetAction `locationName:"actions" min:"1" type:"list"` // The ARN of the data set. Arn *string `locationName:"arn" type:"string"` - // When data set contents are created they are delivered to destinations specified + // When dataset contents are created they are delivered to destinations specified // here. ContentDeliveryRules []*DatasetContentDeliveryRule `locationName:"contentDeliveryRules" type:"list"` @@ -5110,23 +5180,31 @@ type Dataset struct { // The last time the data set was updated. LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` + // A list of data rules that send notifications to Amazon CloudWatch, when data + // arrives late. To specify lateDataRules, the dataset must use a DeltaTimer + // (https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html) + // filter. + LateDataRules []*LateDataRule `locationName:"lateDataRules" min:"1" type:"list"` + // The name of the data set. Name *string `locationName:"name" min:"1" type:"string"` - // [Optional] How long, in days, message data is kept for the data set. + // Optional. How long, in days, message data is kept for the data set. RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` // The status of the data set. Status *string `locationName:"status" type:"string" enum:"DatasetStatus"` - // The "DatasetTrigger" objects that specify when the data set is automatically + // The DatasetTrigger objects that specify when the data set is automatically // updated. Triggers []*DatasetTrigger `locationName:"triggers" type:"list"` - // [Optional] How many versions of data set contents are kept. If not specified + // Optional. How many versions of dataset contents are kept. If not specified // or set to null, only the latest version plus the latest succeeded version - // (if they are different) are kept for the time period specified by the "retentionPeriod" - // parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + // (if they are different) are kept for the time period specified by the retentionPeriod + // parameter. For more information, see Keeping Multiple Versions of AWS IoT + // Analytics Data Sets (https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + // in the AWS IoT Analytics User Guide. VersioningConfiguration *VersioningConfiguration `locationName:"versioningConfiguration" type:"structure"` } @@ -5170,6 +5248,12 @@ func (s *Dataset) SetLastUpdateTime(v time.Time) *Dataset { return s } +// SetLateDataRules sets the LateDataRules field's value. +func (s *Dataset) SetLateDataRules(v []*LateDataRule) *Dataset { + s.LateDataRules = v + return s +} + // SetName sets the Name field's value. func (s *Dataset) SetName(v string) *Dataset { s.Name = &v @@ -5200,7 +5284,7 @@ func (s *Dataset) SetVersioningConfiguration(v *VersioningConfiguration) *Datase return s } -// A "DatasetAction" object that specifies how data set contents are automatically +// A DatasetAction object that specifies how data set contents are automatically // created. type DatasetAction struct { _ struct{} `type:"structure"` @@ -5209,13 +5293,13 @@ type DatasetAction struct { // created. ActionName *string `locationName:"actionName" min:"1" type:"string"` - // Information which allows the system to run a containerized application in - // order to create the data set contents. The application must be in a Docker - // container along with any needed support libraries. + // Information that allows the system to run a containerized application to + // create the dataset contents. The application must be in a Docker container + // along with any required support libraries. ContainerAction *ContainerDatasetAction `locationName:"containerAction" type:"structure"` - // An "SqlQueryDatasetAction" object that uses an SQL query to automatically - // create data set contents. + // An SqlQueryDatasetAction object that uses an SQL query to automatically create + // data set contents. QueryAction *SqlQueryDatasetAction `locationName:"queryAction" type:"structure"` } @@ -5270,14 +5354,14 @@ func (s *DatasetAction) SetQueryAction(v *SqlQueryDatasetAction) *DatasetAction return s } -// Information about the action which automatically creates the data set's contents. +// Information about the action that automatically creates the dataset's contents. type DatasetActionSummary struct { _ struct{} `type:"structure"` - // The name of the action which automatically creates the data set's contents. + // The name of the action that automatically creates the dataset's contents. ActionName *string `locationName:"actionName" min:"1" type:"string"` - // The type of action by which the data set's contents are automatically created. + // The type of action by which the dataset's contents are automatically created. ActionType *string `locationName:"actionType" type:"string" enum:"DatasetActionType"` } @@ -5303,14 +5387,14 @@ func (s *DatasetActionSummary) SetActionType(v string) *DatasetActionSummary { return s } -// The destination to which data set contents are delivered. +// The destination to which dataset contents are delivered. type DatasetContentDeliveryDestination struct { _ struct{} `type:"structure"` - // Configuration information for delivery of data set contents to AWS IoT Events. + // Configuration information for delivery of dataset contents to AWS IoT Events. IotEventsDestinationConfiguration *IotEventsDestinationConfiguration `locationName:"iotEventsDestinationConfiguration" type:"structure"` - // Configuration information for delivery of data set contents to Amazon S3. + // Configuration information for delivery of dataset contents to Amazon S3. S3DestinationConfiguration *S3DestinationConfiguration `locationName:"s3DestinationConfiguration" type:"structure"` } @@ -5356,17 +5440,17 @@ func (s *DatasetContentDeliveryDestination) SetS3DestinationConfiguration(v *S3D return s } -// When data set contents are created they are delivered to destination specified +// When dataset contents are created, they are delivered to destination specified // here. type DatasetContentDeliveryRule struct { _ struct{} `type:"structure"` - // The destination to which data set contents are delivered. + // The destination to which dataset contents are delivered. // // Destination is a required field Destination *DatasetContentDeliveryDestination `locationName:"destination" type:"structure" required:"true"` - // The name of the data set content delivery rules entry. + // The name of the dataset content delivery rules entry. EntryName *string `locationName:"entryName" type:"string"` } @@ -5417,8 +5501,8 @@ type DatasetContentStatus struct { // The reason the data set contents are in this state. Reason *string `locationName:"reason" type:"string"` - // The state of the data set contents. Can be one of "READY", "CREATING", "SUCCEEDED" - // or "FAILED". + // The state of the data set contents. Can be one of READY, CREATING, SUCCEEDED, + // or FAILED. State *string `locationName:"state" type:"string" enum:"DatasetContentState"` } @@ -5444,23 +5528,23 @@ func (s *DatasetContentStatus) SetState(v string) *DatasetContentStatus { return s } -// Summary information about data set contents. +// Summary information about dataset contents. type DatasetContentSummary struct { _ struct{} `type:"structure"` // The time the dataset content status was updated to SUCCEEDED or FAILED. CompletionTime *time.Time `locationName:"completionTime" type:"timestamp"` - // The actual time the creation of the data set contents was started. + // The actual time the creation of the dataset contents was started. CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` - // The time the creation of the data set contents was scheduled to start. + // The time the creation of the dataset contents was scheduled to start. ScheduleTime *time.Time `locationName:"scheduleTime" type:"timestamp"` // The status of the data set contents. Status *DatasetContentStatus `locationName:"status" type:"structure"` - // The version of the data set contents. + // The version of the dataset contents. Version *string `locationName:"version" min:"7" type:"string"` } @@ -5504,11 +5588,11 @@ func (s *DatasetContentSummary) SetVersion(v string) *DatasetContentSummary { return s } -// The data set whose latest contents are used as input to the notebook or application. +// The dataset whose latest contents are used as input to the notebook or application. type DatasetContentVersionValue struct { _ struct{} `type:"structure"` - // The name of the data set whose latest contents are used as input to the notebook + // The name of the dataset whose latest contents are used as input to the notebook // or application. // // DatasetName is a required field @@ -5551,7 +5635,7 @@ func (s *DatasetContentVersionValue) SetDatasetName(v string) *DatasetContentVer type DatasetEntry struct { _ struct{} `type:"structure"` - // The pre-signed URI of the data set item. + // The presigned URI of the data set item. DataURI *string `locationName:"dataURI" type:"string"` // The name of the data set item. @@ -5584,7 +5668,7 @@ func (s *DatasetEntry) SetEntryName(v string) *DatasetEntry { type DatasetSummary struct { _ struct{} `type:"structure"` - // A list of "DataActionSummary" objects. + // A list of DataActionSummary objects. Actions []*DatasetActionSummary `locationName:"actions" min:"1" type:"list"` // The time the data set was created. @@ -5651,7 +5735,7 @@ func (s *DatasetSummary) SetTriggers(v []*DatasetTrigger) *DatasetSummary { return s } -// The "DatasetTrigger" that specifies when the data set is automatically updated. +// The DatasetTrigger that specifies when the data set is automatically updated. type DatasetTrigger struct { _ struct{} `type:"structure"` @@ -5659,7 +5743,7 @@ type DatasetTrigger struct { // contents. Dataset *TriggeringDataset `locationName:"dataset" type:"structure"` - // The "Schedule" when the trigger is initiated. + // The Schedule when the trigger is initiated. Schedule *Schedule `locationName:"schedule" type:"structure"` } @@ -5710,13 +5794,22 @@ type Datastore struct { // When the data store was created. CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` + // The last time when a new message arrived in the data store. + // + // AWS IoT Analytics updates this value at most once per minute for one data + // store. Hence, the lastMessageArrivalTime value is an approximation. + // + // This feature only applies to messages that arrived in the data store after + // October 23, 2020. + LastMessageArrivalTime *time.Time `locationName:"lastMessageArrivalTime" type:"timestamp"` + // The last time the data store was updated. LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` // The name of the data store. Name *string `locationName:"name" min:"1" type:"string"` - // How long, in days, message data is kept for the data store. When "customerManagedS3" + // How long, in days, message data is kept for the data store. When customerManagedS3 // storage is selected, this parameter is ignored. RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` @@ -5735,9 +5828,9 @@ type Datastore struct { // The data store is being deleted. Status *string `locationName:"status" type:"string" enum:"DatastoreStatus"` - // Where data store data is stored. You may choose one of "serviceManagedS3" - // or "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". - // This cannot be changed after the data store is created. + // Where data store data is stored. You can choose one of serviceManagedS3 or + // customerManagedS3 storage. If not specified, the default is serviceManagedS3. + // You cannot change this storage option after the data store is created. Storage *DatastoreStorage `locationName:"storage" type:"structure"` } @@ -5763,6 +5856,12 @@ func (s *Datastore) SetCreationTime(v time.Time) *Datastore { return s } +// SetLastMessageArrivalTime sets the LastMessageArrivalTime field's value. +func (s *Datastore) SetLastMessageArrivalTime(v time.Time) *Datastore { + s.LastMessageArrivalTime = &v + return s +} + // SetLastUpdateTime sets the LastUpdateTime field's value. func (s *Datastore) SetLastUpdateTime(v time.Time) *Datastore { s.LastUpdateTime = &v @@ -5793,7 +5892,7 @@ func (s *Datastore) SetStorage(v *DatastoreStorage) *Datastore { return s } -// The 'datastore' activity that specifies where to store the processed data. +// The datastore activity that specifies where to store the processed data. type DatastoreActivity struct { _ struct{} `type:"structure"` @@ -5802,7 +5901,7 @@ type DatastoreActivity struct { // DatastoreName is a required field DatastoreName *string `locationName:"datastoreName" min:"1" type:"string" required:"true"` - // The name of the 'datastore' activity. + // The name of the datastore activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -5876,21 +5975,21 @@ func (s *DatastoreStatistics) SetSize(v *EstimatedResourceSize) *DatastoreStatis return s } -// Where data store data is stored. You may choose one of "serviceManagedS3" -// or "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". -// This cannot be changed after the data store is created. +// Where data store data is stored. You can choose one of serviceManagedS3 or +// customerManagedS3 storage. If not specified, the default is serviceManagedS3. +// You cannot change this storage option after the data store is created. type DatastoreStorage struct { _ struct{} `type:"structure"` // Use this to store data store data in an S3 bucket that you manage. When customer - // managed storage is selected, the "retentionPeriod" parameter is ignored. - // The choice of service-managed or customer-managed S3 storage cannot be changed + // managed storage is selected, the retentionPeriod parameter is ignored. The + // choice of service-managed or customer-managed S3 storage cannot be changed // after creation of the data store. CustomerManagedS3 *CustomerManagedDatastoreS3Storage `locationName:"customerManagedS3" type:"structure"` - // Use this to store data store data in an S3 bucket managed by the AWS IoT - // Analytics service. The choice of service-managed or customer-managed S3 storage - // cannot be changed after creation of the data store. + // Use this to store data store data in an S3 bucket managed by AWS IoT Analytics. + // You cannot change the choice of service-managed or customer-managed S3 storage + // after the data store is created. ServiceManagedS3 *ServiceManagedDatastoreS3Storage `locationName:"serviceManagedS3" type:"structure"` } @@ -5938,8 +6037,7 @@ type DatastoreStorageSummary struct { // Used to store data store data in an S3 bucket that you manage. CustomerManagedS3 *CustomerManagedDatastoreS3StorageSummary `locationName:"customerManagedS3" type:"structure"` - // Used to store data store data in an S3 bucket managed by the AWS IoT Analytics - // service. + // Used to store data store data in an S3 bucket managed by AWS IoT Analytics. ServiceManagedS3 *ServiceManagedDatastoreS3StorageSummary `locationName:"serviceManagedS3" type:"structure"` } @@ -5978,6 +6076,15 @@ type DatastoreSummary struct { // Where data store data is stored. DatastoreStorage *DatastoreStorageSummary `locationName:"datastoreStorage" type:"structure"` + // The last time when a new message arrived in the data store. + // + // AWS IoT Analytics updates this value at most once per minute for one data + // store. Hence, the lastMessageArrivalTime value is an approximation. + // + // This feature only applies to messages that arrived in the data store after + // October 23, 2020. + LastMessageArrivalTime *time.Time `locationName:"lastMessageArrivalTime" type:"timestamp"` + // The last time the data store was updated. LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` @@ -6013,6 +6120,12 @@ func (s *DatastoreSummary) SetDatastoreStorage(v *DatastoreStorageSummary) *Data return s } +// SetLastMessageArrivalTime sets the LastMessageArrivalTime field's value. +func (s *DatastoreSummary) SetLastMessageArrivalTime(v time.Time) *DatastoreSummary { + s.LastMessageArrivalTime = &v + return s +} + // SetLastUpdateTime sets the LastUpdateTime field's value. func (s *DatastoreSummary) SetLastUpdateTime(v time.Time) *DatastoreSummary { s.LastUpdateTime = &v @@ -6083,12 +6196,12 @@ func (s DeleteChannelOutput) GoString() string { type DeleteDatasetContentInput struct { _ struct{} `type:"structure"` - // The name of the data set whose content is deleted. + // The name of the dataset whose content is deleted. // // DatasetName is a required field DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` - // The version of the data set whose content is deleted. You can also use the + // The version of the dataset whose content is deleted. You can also use the // strings "$LATEST" or "$LATEST_SUCCEEDED" to delete the latest or latest successfully // completed data set. If not specified, "$LATEST_SUCCEEDED" is the default. VersionId *string `location:"querystring" locationName:"versionId" min:"7" type:"string"` @@ -6319,22 +6432,21 @@ func (s DeletePipelineOutput) GoString() string { type DeltaTime struct { _ struct{} `type:"structure"` - // The number of seconds of estimated "in flight" lag time of message data. - // When you create data set contents using message data from a specified time - // frame, some message data may still be "in flight" when processing begins, - // and so will not arrive in time to be processed. Use this field to make allowances - // for the "in flight" time of your message data, so that data not processed - // from a previous time frame will be included with the next time frame. Without - // this, missed message data would be excluded from processing during the next - // time frame as well, because its timestamp places it within the previous time - // frame. + // The number of seconds of estimated in-flight lag time of message data. When + // you create dataset contents using message data from a specified timeframe, + // some message data might still be in flight when processing begins, and so + // do not arrive in time to be processed. Use this field to make allowances + // for the in flight time of your message data, so that data not processed from + // a previous timeframe is included with the next timeframe. Otherwise, missed + // message data would be excluded from processing during the next timeframe + // too, because its timestamp places it within the previous timeframe. // // OffsetSeconds is a required field OffsetSeconds *int64 `locationName:"offsetSeconds" type:"integer" required:"true"` - // An expression by which the time of the message data may be determined. This - // may be the name of a timestamp field, or a SQL expression which is used to - // derive the time the message data was generated. + // An expression by which the time of the message data might be determined. + // This can be the name of a timestamp field or a SQL expression that is used + // to derive the time the message data was generated. // // TimeExpression is a required field TimeExpression *string `locationName:"timeExpression" type:"string" required:"true"` @@ -6378,6 +6490,63 @@ func (s *DeltaTime) SetTimeExpression(v string) *DeltaTime { return s } +// A structure that contains the configuration information of a delta time session +// window. +// +// DeltaTime (https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html) +// specifies a time interval. You can use DeltaTime to create dataset contents +// with data that has arrived in the data store since the last execution. For +// an example of DeltaTime, see Creating a SQL dataset with a delta window +// (CLI) (https://docs.aws.amazon.com/iotanalytics/latest/userguide/automate-create-dataset.html#automate-example6) +// in the AWS IoT Analytics User Guide. +type DeltaTimeSessionWindowConfiguration struct { + _ struct{} `type:"structure"` + + // A time interval. You can use timeoutInMinutes so that AWS IoT Analytics can + // batch up late data notifications that have been generated since the last + // execution. AWS IoT Analytics sends one batch of notifications to Amazon CloudWatch + // Events at one time. + // + // For more information about how to write a timestamp expression, see Date + // and Time Functions and Operators (https://prestodb.io/docs/0.172/functions/datetime.html), + // in the Presto 0.172 Documentation. + // + // TimeoutInMinutes is a required field + TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s DeltaTimeSessionWindowConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeltaTimeSessionWindowConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeltaTimeSessionWindowConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeltaTimeSessionWindowConfiguration"} + if s.TimeoutInMinutes == nil { + invalidParams.Add(request.NewErrParamRequired("TimeoutInMinutes")) + } + if s.TimeoutInMinutes != nil && *s.TimeoutInMinutes < 1 { + invalidParams.Add(request.NewErrParamMinValue("TimeoutInMinutes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTimeoutInMinutes sets the TimeoutInMinutes field's value. +func (s *DeltaTimeSessionWindowConfiguration) SetTimeoutInMinutes(v int64) *DeltaTimeSessionWindowConfiguration { + s.TimeoutInMinutes = &v + return s +} + type DescribeChannelInput struct { _ struct{} `type:"structure"` @@ -6436,7 +6605,7 @@ type DescribeChannelOutput struct { // An object that contains information about the channel. Channel *Channel `locationName:"channel" type:"structure"` - // Statistics about the channel. Included if the 'includeStatistics' parameter + // Statistics about the channel. Included if the includeStatistics parameter // is set to true in the request. Statistics *ChannelStatistics `locationName:"statistics" type:"structure"` } @@ -6586,7 +6755,7 @@ type DescribeDatastoreOutput struct { Datastore *Datastore `locationName:"datastore" type:"structure"` // Additional statistical information about the data store. Included if the - // 'includeStatistics' parameter is set to true in the request. + // includeStatistics parameter is set to true in the request. Statistics *DatastoreStatistics `locationName:"statistics" type:"structure"` } @@ -6693,7 +6862,7 @@ func (s *DescribePipelineInput) SetPipelineName(v string) *DescribePipelineInput type DescribePipelineOutput struct { _ struct{} `type:"structure"` - // A "Pipeline" object that contains information about the pipeline. + // A Pipeline object that contains information about the pipeline. Pipeline *Pipeline `locationName:"pipeline" type:"structure"` } @@ -6722,7 +6891,7 @@ type DeviceRegistryEnrichActivity struct { // Attribute is a required field Attribute *string `locationName:"attribute" min:"1" type:"string" required:"true"` - // The name of the 'deviceRegistryEnrich' activity. + // The name of the deviceRegistryEnrich activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -6818,7 +6987,7 @@ func (s *DeviceRegistryEnrichActivity) SetThingName(v string) *DeviceRegistryEnr return s } -// An activity that adds information from the AWS IoT Device Shadows service +// An activity that adds information from the AWS IoT Device Shadow service // to a message. type DeviceShadowEnrichActivity struct { _ struct{} `type:"structure"` @@ -6828,7 +6997,7 @@ type DeviceShadowEnrichActivity struct { // Attribute is a required field Attribute *string `locationName:"attribute" min:"1" type:"string" required:"true"` - // The name of the 'deviceShadowEnrich' activity. + // The name of the deviceShadowEnrich activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -6931,7 +7100,7 @@ type EstimatedResourceSize struct { // The time when the estimate of the size of the resource was made. EstimatedOn *time.Time `locationName:"estimatedOn" type:"timestamp"` - // The estimated size of the resource in bytes. + // The estimated size of the resource, in bytes. EstimatedSizeInBytes *float64 `locationName:"estimatedSizeInBytes" type:"double"` } @@ -6962,12 +7131,12 @@ type FilterActivity struct { _ struct{} `type:"structure"` // An expression that looks like a SQL WHERE clause that must return a Boolean - // value. + // value. Messages that satisfy the condition are passed to the next activity. // // Filter is a required field Filter *string `locationName:"filter" min:"1" type:"string" required:"true"` - // The name of the 'filter' activity. + // The name of the filter activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -7088,7 +7257,7 @@ func (s *GetDatasetContentInput) SetVersionId(v string) *GetDatasetContentInput type GetDatasetContentOutput struct { _ struct{} `type:"structure"` - // A list of "DatasetEntry" objects. + // A list of DatasetEntry objects. Entries []*DatasetEntry `locationName:"entries" type:"list"` // The status of the data set content. @@ -7126,20 +7295,20 @@ func (s *GetDatasetContentOutput) SetTimestamp(v time.Time) *GetDatasetContentOu return s } -// Configuration information for coordination with the AWS Glue ETL (extract, -// transform and load) service. +// Configuration information for coordination with AWS Glue, a fully managed +// extract, transform and load (ETL) service. type GlueConfiguration struct { _ struct{} `type:"structure"` // The name of the database in your AWS Glue Data Catalog in which the table - // is located. (An AWS Glue Data Catalog database contains Glue Data tables.) + // is located. An AWS Glue Data Catalog database contains metadata tables. // // DatabaseName is a required field DatabaseName *string `locationName:"databaseName" min:"1" type:"string" required:"true"` - // The name of the table in your AWS Glue Data Catalog which is used to perform - // the ETL (extract, transform and load) operations. (An AWS Glue Data Catalog - // table contains partitioned data and descriptions of data sources and targets.) + // The name of the table in your AWS Glue Data Catalog that is used to perform + // the ETL operations. An AWS Glue Data Catalog table contains partitioned data + // and descriptions of data sources and targets. // // TableName is a required field TableName *string `locationName:"tableName" min:"1" type:"string" required:"true"` @@ -7301,17 +7470,17 @@ func (s *InvalidRequestException) RequestID() string { return s.RespMetadata.RequestID } -// Configuration information for delivery of data set contents to AWS IoT Events. +// Configuration information for delivery of dataset contents to AWS IoT Events. type IotEventsDestinationConfiguration struct { _ struct{} `type:"structure"` - // The name of the AWS IoT Events input to which data set contents are delivered. + // The name of the AWS IoT Events input to which dataset contents are delivered. // // InputName is a required field InputName *string `locationName:"inputName" min:"1" type:"string" required:"true"` - // The ARN of the role which grants AWS IoT Analytics permission to deliver - // data set contents to an AWS IoT Events input. + // The ARN of the role that grants AWS IoT Analytics permission to deliver dataset + // contents to an AWS IoT Events input. // // RoleArn is a required field RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` @@ -7367,7 +7536,7 @@ type LambdaActivity struct { // The number of messages passed to the Lambda function for processing. // - // The AWS Lambda function must be able to process all of these messages within + // The Lambda function must be able to process all of these messages within // five minutes, which is the maximum timeout duration for Lambda functions. // // BatchSize is a required field @@ -7378,7 +7547,7 @@ type LambdaActivity struct { // LambdaName is a required field LambdaName *string `locationName:"lambdaName" min:"1" type:"string" required:"true"` - // The name of the 'lambda' activity. + // The name of the lambda activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -7452,6 +7621,102 @@ func (s *LambdaActivity) SetNext(v string) *LambdaActivity { return s } +// A structure that contains the name and configuration information of a late +// data rule. +type LateDataRule struct { + _ struct{} `type:"structure"` + + // The information needed to configure the late data rule. + // + // RuleConfiguration is a required field + RuleConfiguration *LateDataRuleConfiguration `locationName:"ruleConfiguration" type:"structure" required:"true"` + + // The name of the late data rule. + RuleName *string `locationName:"ruleName" min:"1" type:"string"` +} + +// String returns the string representation +func (s LateDataRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LateDataRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LateDataRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LateDataRule"} + if s.RuleConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RuleConfiguration")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + if s.RuleConfiguration != nil { + if err := s.RuleConfiguration.Validate(); err != nil { + invalidParams.AddNested("RuleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleConfiguration sets the RuleConfiguration field's value. +func (s *LateDataRule) SetRuleConfiguration(v *LateDataRuleConfiguration) *LateDataRule { + s.RuleConfiguration = v + return s +} + +// SetRuleName sets the RuleName field's value. +func (s *LateDataRule) SetRuleName(v string) *LateDataRule { + s.RuleName = &v + return s +} + +// The information needed to configure a delta time session window. +type LateDataRuleConfiguration struct { + _ struct{} `type:"structure"` + + // The information needed to configure a delta time session window. + DeltaTimeSessionWindowConfiguration *DeltaTimeSessionWindowConfiguration `locationName:"deltaTimeSessionWindowConfiguration" type:"structure"` +} + +// String returns the string representation +func (s LateDataRuleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LateDataRuleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LateDataRuleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LateDataRuleConfiguration"} + if s.DeltaTimeSessionWindowConfiguration != nil { + if err := s.DeltaTimeSessionWindowConfiguration.Validate(); err != nil { + invalidParams.AddNested("DeltaTimeSessionWindowConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeltaTimeSessionWindowConfiguration sets the DeltaTimeSessionWindowConfiguration field's value. +func (s *LateDataRuleConfiguration) SetDeltaTimeSessionWindowConfiguration(v *DeltaTimeSessionWindowConfiguration) *LateDataRuleConfiguration { + s.DeltaTimeSessionWindowConfiguration = v + return s +} + // The command caused an internal limit to be exceeded. type LimitExceededException struct { _ struct{} `type:"structure"` @@ -7558,7 +7823,7 @@ func (s *ListChannelsInput) SetNextToken(v string) *ListChannelsInput { type ListChannelsOutput struct { _ struct{} `type:"structure"` - // A list of "ChannelSummary" objects. + // A list of ChannelSummary objects. ChannelSummaries []*ChannelSummary `locationName:"channelSummaries" type:"list"` // The token to retrieve the next set of results, or null if there are no more @@ -7755,7 +8020,7 @@ func (s *ListDatasetsInput) SetNextToken(v string) *ListDatasetsInput { type ListDatasetsOutput struct { _ struct{} `type:"structure"` - // A list of "DatasetSummary" objects. + // A list of DatasetSummary objects. DatasetSummaries []*DatasetSummary `locationName:"datasetSummaries" type:"list"` // The token to retrieve the next set of results, or null if there are no more @@ -7835,7 +8100,7 @@ func (s *ListDatastoresInput) SetNextToken(v string) *ListDatastoresInput { type ListDatastoresOutput struct { _ struct{} `type:"structure"` - // A list of "DatastoreSummary" objects. + // A list of DatastoreSummary objects. DatastoreSummaries []*DatastoreSummary `locationName:"datastoreSummaries" type:"list"` // The token to retrieve the next set of results, or null if there are no more @@ -7919,7 +8184,7 @@ type ListPipelinesOutput struct { // results. NextToken *string `locationName:"nextToken" type:"string"` - // A list of "PipelineSummary" objects. + // A list of PipelineSummary objects. PipelineSummaries []*PipelineSummary `locationName:"pipelineSummaries" type:"list"` } @@ -7989,7 +8254,7 @@ func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResource type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // The tags (metadata) which you have assigned to the resource. + // The tags (metadata) that you have assigned to the resource. Tags []*Tag `locationName:"tags" min:"1" type:"list"` } @@ -8018,7 +8283,7 @@ type LoggingOptions struct { // Enabled is a required field Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` - // The logging level. Currently, only "ERROR" is supported. + // The logging level. Currently, only ERROR is supported. // // Level is a required field Level *string `locationName:"level" type:"string" required:"true" enum:"LoggingLevel"` @@ -8095,7 +8360,7 @@ type MathActivity struct { // Math is a required field Math *string `locationName:"math" min:"1" type:"string" required:"true"` - // The name of the 'math' activity. + // The name of the math activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -8173,15 +8438,15 @@ func (s *MathActivity) SetNext(v string) *MathActivity { type Message struct { _ struct{} `type:"structure"` - // The ID you wish to assign to the message. Each "messageId" must be unique - // within each batch sent. + // The ID you want to assign to the message. Each messageId must be unique within + // each batch sent. // // MessageId is a required field MessageId *string `locationName:"messageId" min:"1" type:"string" required:"true"` - // The payload of the message. This may be a JSON string or a Base64-encoded - // string representing binary data (in which case you must decode it by means - // of a pipeline activity). + // The payload of the message. This can be a JSON string or a base64-encoded + // string representing binary data, in which case you must decode it by means + // of a pipeline activity. // // Payload is automatically base64 encoded/decoded by the SDK. // @@ -8234,7 +8499,7 @@ func (s *Message) SetPayload(v []byte) *Message { type OutputFileUriValue struct { _ struct{} `type:"structure"` - // The URI of the location where data set contents are stored, usually the URI + // The URI of the location where dataset contents are stored, usually the URI // of a file in an S3 bucket. // // FileName is a required field @@ -8355,7 +8620,7 @@ type PipelineActivity struct { // Adds data from the AWS IoT device registry to your message. DeviceRegistryEnrich *DeviceRegistryEnrichActivity `locationName:"deviceRegistryEnrich" type:"structure"` - // Adds information from the AWS IoT Device Shadows service to a message. + // Adds information from the AWS IoT Device Shadow service to a message. DeviceShadowEnrich *DeviceShadowEnrichActivity `locationName:"deviceShadowEnrich" type:"structure"` // Filters a message based on its attributes. @@ -8614,8 +8879,8 @@ func (s PutLoggingOptionsOutput) GoString() string { return s.String() } -// Information which is used to filter message data, to segregate it according -// to the time frame in which it arrives. +// Information that is used to filter message data, to segregate it according +// to the timeframe in which it arrives. type QueryFilter struct { _ struct{} `type:"structure"` @@ -8664,7 +8929,7 @@ type RemoveAttributesActivity struct { // Attributes is a required field Attributes []*string `locationName:"attributes" min:"1" type:"list" required:"true"` - // The name of the 'removeAttributes' activity. + // The name of the removeAttributes activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -8733,7 +8998,7 @@ type ReprocessingSummary struct { // The time the pipeline reprocessing was created. CreationTime *time.Time `locationName:"creationTime" type:"timestamp"` - // The 'reprocessingId' returned by "StartPipelineReprocessing". + // The reprocessingId returned by StartPipelineReprocessing. Id *string `locationName:"id" type:"string"` // The status of the pipeline reprocessing. @@ -8830,18 +9095,18 @@ func (s *ResourceAlreadyExistsException) RequestID() string { return s.RespMetadata.RequestID } -// The configuration of the resource used to execute the "containerAction". +// The configuration of the resource used to execute the containerAction. type ResourceConfiguration struct { _ struct{} `type:"structure"` - // The type of the compute resource used to execute the "containerAction". Possible - // values are: ACU_1 (vCPU=4, memory=16GiB) or ACU_2 (vCPU=8, memory=32GiB). + // The type of the compute resource used to execute the containerAction. Possible + // values are: ACU_1 (vCPU=4, memory=16 GiB) or ACU_2 (vCPU=8, memory=32 GiB). // // ComputeType is a required field ComputeType *string `locationName:"computeType" type:"string" required:"true" enum:"ComputeType"` - // The size (in GB) of the persistent storage available to the resource instance - // used to execute the "containerAction" (min: 1, max: 50). + // The size, in GB, of the persistent storage available to the resource instance + // used to execute the containerAction (min: 1, max: 50). // // VolumeSizeInGB is a required field VolumeSizeInGB *int64 `locationName:"volumeSizeInGB" min:"1" type:"integer" required:"true"` @@ -8948,7 +9213,7 @@ func (s *ResourceNotFoundException) RequestID() string { type RetentionPeriod struct { _ struct{} `type:"structure"` - // The number of days that message data is kept. The "unlimited" parameter must + // The number of days that message data is kept. The unlimited parameter must // be false. NumberOfDays *int64 `locationName:"numberOfDays" min:"1" type:"integer"` @@ -8999,11 +9264,11 @@ type RunPipelineActivityInput struct { // Payloads is a required field Payloads [][]byte `locationName:"payloads" min:"1" type:"list" required:"true"` - // The pipeline activity that is run. This must not be a 'channel' activity - // or a 'datastore' activity because these activities are used in a pipeline - // only to load the original message and to store the (possibly) transformed - // message. If a 'lambda' activity is specified, only short-running Lambda functions - // (those with a timeout of less than 30 seconds or less) can be used. + // The pipeline activity that is run. This must not be a channel activity or + // a datastore activity because these activities are used in a pipeline only + // to load the original message and to store the (possibly) transformed message. + // If a lambda activity is specified, only short-running Lambda functions (those + // with a timeout of less than 30 seconds or less) can be used. // // PipelineActivity is a required field PipelineActivity *PipelineActivity `locationName:"pipelineActivity" type:"structure" required:"true"` @@ -9089,29 +9354,45 @@ func (s *RunPipelineActivityOutput) SetPayloads(v [][]byte) *RunPipelineActivity return s } -// Configuration information for delivery of data set contents to Amazon S3. +// Configuration information for delivery of dataset contents to Amazon Simple +// Storage Service (Amazon S3). type S3DestinationConfiguration struct { _ struct{} `type:"structure"` - // The name of the Amazon S3 bucket to which data set contents are delivered. + // The name of the S3 bucket to which dataset contents are delivered. // // Bucket is a required field Bucket *string `locationName:"bucket" min:"3" type:"string" required:"true"` - // Configuration information for coordination with the AWS Glue ETL (extract, - // transform and load) service. + // Configuration information for coordination with AWS Glue, a fully managed + // extract, transform and load (ETL) service. GlueConfiguration *GlueConfiguration `locationName:"glueConfiguration" type:"structure"` - // The key of the data set contents object. Each object in an Amazon S3 bucket - // has a key that is its unique identifier within the bucket (each object in - // a bucket has exactly one key). To produce a unique key, you can use "!{iotanalytics:scheduledTime}" - // to insert the time of the scheduled SQL query run, or "!{iotanalytics:versioned} - // to insert a unique hash identifying the data set, for example: "/DataSet/!{iotanalytics:scheduledTime}/!{iotanalytics:versioned}.csv". + // The key of the dataset contents object in an S3 bucket. Each object has a + // key that is a unique identifier. Each object has exactly one key. + // + // You can create a unique key with the following options: + // + // * Use !{iotanalytics:scheduleTime} to insert the time of a scheduled SQL + // query run. + // + // * Use !{iotanalytics:versionId} to insert a unique hash that identifies + // a dataset content. + // + // * Use !{iotanalytics:creationTime} to insert the creation time of a dataset + // content. + // + // The following example creates a unique key for a CSV file: dataset/mydataset/!{iotanalytics:scheduleTime}/!{iotanalytics:versionId}.csv + // + // If you don't use !{iotanalytics:versionId} to specify the key, you might + // get duplicate keys. For example, you might have two dataset contents with + // the same scheduleTime but different versionIds. This means that one dataset + // content overwrites the other. // // Key is a required field Key *string `locationName:"key" min:"1" type:"string" required:"true"` - // The ARN of the role which grants AWS IoT Analytics permission to interact + // The ARN of the role that grants AWS IoT Analytics permission to interact // with your Amazon S3 and AWS Glue resources. // // RoleArn is a required field @@ -9196,7 +9477,7 @@ type SampleChannelDataInput struct { // The end of the time window from which sample messages are retrieved. EndTime *time.Time `location:"querystring" locationName:"endTime" type:"timestamp"` - // The number of sample messages to be retrieved. The limit is 10, the default + // The number of sample messages to be retrieved. The limit is 10. The default // is also 10. MaxMessages *int64 `location:"querystring" locationName:"maxMessages" min:"1" type:"integer"` @@ -9317,7 +9598,7 @@ type SelectAttributesActivity struct { // Attributes is a required field Attributes []*string `locationName:"attributes" min:"1" type:"list" required:"true"` - // The name of the 'selectAttributes' activity. + // The name of the selectAttributes activity. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -9379,9 +9660,9 @@ func (s *SelectAttributesActivity) SetNext(v string) *SelectAttributesActivity { return s } -// Use this to store channel data in an S3 bucket managed by the AWS IoT Analytics -// service. The choice of service-managed or customer-managed S3 storage cannot -// be changed after creation of the channel. +// Use this to store channel data in an S3 bucket managed by AWS IoT Analytics. +// You cannot change the choice of service-managed or customer-managed S3 storage +// after the channel is created. type ServiceManagedChannelS3Storage struct { _ struct{} `type:"structure"` } @@ -9396,8 +9677,7 @@ func (s ServiceManagedChannelS3Storage) GoString() string { return s.String() } -// Used to store channel data in an S3 bucket managed by the AWS IoT Analytics -// service. +// Used to store channel data in an S3 bucket managed by AWS IoT Analytics. type ServiceManagedChannelS3StorageSummary struct { _ struct{} `type:"structure"` } @@ -9412,9 +9692,9 @@ func (s ServiceManagedChannelS3StorageSummary) GoString() string { return s.String() } -// Use this to store data store data in an S3 bucket managed by the AWS IoT -// Analytics service. The choice of service-managed or customer-managed S3 storage -// cannot be changed after creation of the data store. +// Use this to store data store data in an S3 bucket managed by AWS IoT Analytics. +// You cannot change the choice of service-managed or customer-managed S3 storage +// after the data store is created. type ServiceManagedDatastoreS3Storage struct { _ struct{} `type:"structure"` } @@ -9429,8 +9709,7 @@ func (s ServiceManagedDatastoreS3Storage) GoString() string { return s.String() } -// Used to store data store data in an S3 bucket managed by the AWS IoT Analytics -// service. +// Used to store data store data in an S3 bucket managed by AWS IoT Analytics. type ServiceManagedDatastoreS3StorageSummary struct { _ struct{} `type:"structure"` } @@ -9505,7 +9784,7 @@ func (s *ServiceUnavailableException) RequestID() string { type SqlQueryDatasetAction struct { _ struct{} `type:"structure"` - // Pre-filters applied to message data. + // Prefilters applied to message data. Filters []*QueryFilter `locationName:"filters" type:"list"` // A SQL query string. @@ -9641,7 +9920,7 @@ func (s *StartPipelineReprocessingOutput) SetReprocessingId(v string) *StartPipe return s } -// A set of key/value pairs which are used to manage the resource. +// A set of key-value pairs that are used to manage the resource. type Tag struct { _ struct{} `type:"structure"` @@ -9838,12 +10117,12 @@ func (s *ThrottlingException) RequestID() string { return s.RespMetadata.RequestID } -// Information about the data set whose content generation triggers the new -// data set content generation. +// Information about the dataset whose content generation triggers the new dataset +// content generation. type TriggeringDataset struct { _ struct{} `type:"structure"` - // The name of the data set whose content generation triggers the new data set + // The name of the dataset whose content generation triggers the new dataset // content generation. // // Name is a required field @@ -9962,9 +10241,9 @@ type UpdateChannelInput struct { // ChannelName is a required field ChannelName *string `location:"uri" locationName:"channelName" min:"1" type:"string" required:"true"` - // Where channel data is stored. You may choose one of "serviceManagedS3" or - // "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". - // This cannot be changed after creation of the channel. + // Where channel data is stored. You can choose one of serviceManagedS3 or customerManagedS3 + // storage. If not specified, the default is serviceManagedS3. You cannot change + // this storage option after the channel is created. ChannelStorage *ChannelStorage `locationName:"channelStorage" type:"structure"` // How long, in days, message data is kept for the channel. The retention period @@ -10043,12 +10322,12 @@ func (s UpdateChannelOutput) GoString() string { type UpdateDatasetInput struct { _ struct{} `type:"structure"` - // A list of "DatasetAction" objects. + // A list of DatasetAction objects. // // Actions is a required field Actions []*DatasetAction `locationName:"actions" min:"1" type:"list" required:"true"` - // When data set contents are created they are delivered to destinations specified + // When dataset contents are created, they are delivered to destinations specified // here. ContentDeliveryRules []*DatasetContentDeliveryRule `locationName:"contentDeliveryRules" type:"list"` @@ -10057,17 +10336,25 @@ type UpdateDatasetInput struct { // DatasetName is a required field DatasetName *string `location:"uri" locationName:"datasetName" min:"1" type:"string" required:"true"` - // How long, in days, data set contents are kept for the data set. + // A list of data rules that send notifications to Amazon CloudWatch, when data + // arrives late. To specify lateDataRules, the dataset must use a DeltaTimer + // (https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html) + // filter. + LateDataRules []*LateDataRule `locationName:"lateDataRules" min:"1" type:"list"` + + // How long, in days, dataset contents are kept for the dataset. RetentionPeriod *RetentionPeriod `locationName:"retentionPeriod" type:"structure"` - // A list of "DatasetTrigger" objects. The list can be empty or can contain - // up to five DataSetTrigger objects. + // A list of DatasetTrigger objects. The list can be empty or can contain up + // to five DatasetTrigger objects. Triggers []*DatasetTrigger `locationName:"triggers" type:"list"` - // [Optional] How many versions of data set contents are kept. If not specified + // Optional. How many versions of dataset contents are kept. If not specified // or set to null, only the latest version plus the latest succeeded version - // (if they are different) are kept for the time period specified by the "retentionPeriod" - // parameter. (For more information, see https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + // (if they are different) are kept for the time period specified by the retentionPeriod + // parameter. For more information, see Keeping Multiple Versions of AWS IoT + // Analytics Data Sets (https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html#aws-iot-analytics-dataset-versions) + // in the AWS IoT Analytics User Guide. VersioningConfiguration *VersioningConfiguration `locationName:"versioningConfiguration" type:"structure"` } @@ -10096,6 +10383,9 @@ func (s *UpdateDatasetInput) Validate() error { if s.DatasetName != nil && len(*s.DatasetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DatasetName", 1)) } + if s.LateDataRules != nil && len(s.LateDataRules) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LateDataRules", 1)) + } if s.Actions != nil { for i, v := range s.Actions { if v == nil { @@ -10116,6 +10406,16 @@ func (s *UpdateDatasetInput) Validate() error { } } } + if s.LateDataRules != nil { + for i, v := range s.LateDataRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LateDataRules", i), err.(request.ErrInvalidParams)) + } + } + } if s.RetentionPeriod != nil { if err := s.RetentionPeriod.Validate(); err != nil { invalidParams.AddNested("RetentionPeriod", err.(request.ErrInvalidParams)) @@ -10161,6 +10461,12 @@ func (s *UpdateDatasetInput) SetDatasetName(v string) *UpdateDatasetInput { return s } +// SetLateDataRules sets the LateDataRules field's value. +func (s *UpdateDatasetInput) SetLateDataRules(v []*LateDataRule) *UpdateDatasetInput { + s.LateDataRules = v + return s +} + // SetRetentionPeriod sets the RetentionPeriod field's value. func (s *UpdateDatasetInput) SetRetentionPeriod(v *RetentionPeriod) *UpdateDatasetInput { s.RetentionPeriod = v @@ -10201,9 +10507,9 @@ type UpdateDatastoreInput struct { // DatastoreName is a required field DatastoreName *string `location:"uri" locationName:"datastoreName" min:"1" type:"string" required:"true"` - // Where data store data is stored. You may choose one of "serviceManagedS3" - // or "customerManagedS3" storage. If not specified, the default is "serviceManagedS3". - // This cannot be changed after the data store is created. + // Where data store data is stored. You can choose one of serviceManagedS3 or + // customerManagedS3 storage. If not specified, the default isserviceManagedS3. + // You cannot change this storage option after the data store is created. DatastoreStorage *DatastoreStorage `locationName:"datastoreStorage" type:"structure"` // How long, in days, message data is kept for the data store. The retention @@ -10282,15 +10588,15 @@ func (s UpdateDatastoreOutput) GoString() string { type UpdatePipelineInput struct { _ struct{} `type:"structure"` - // A list of "PipelineActivity" objects. Activities perform transformations - // on your messages, such as removing, renaming or adding message attributes; - // filtering messages based on attribute values; invoking your Lambda functions - // on messages for advanced processing; or performing mathematical transformations - // to normalize device data. + // A list of PipelineActivity objects. Activities perform transformations on + // your messages, such as removing, renaming or adding message attributes; filtering + // messages based on attribute values; invoking your Lambda functions on messages + // for advanced processing; or performing mathematical transformations to normalize + // device data. // // The list can be 2-25 PipelineActivity objects and must contain both a channel - // and a datastore activity. Each entry in the list must contain only one activity, - // for example: + // and a datastore activity. Each entry in the list must contain only one activity. + // For example: // // pipelineActivities = [ { "channel": { ... } }, { "lambda": { ... } }, ... // ] @@ -10372,13 +10678,13 @@ func (s UpdatePipelineOutput) GoString() string { return s.String() } -// An instance of a variable to be passed to the "containerAction" execution. -// Each variable must have a name and a value given by one of "stringValue", -// "datasetContentVersionValue", or "outputFileUriValue". +// An instance of a variable to be passed to the containerAction execution. +// Each variable must have a name and a value given by one of stringValue, datasetContentVersionValue, +// or outputFileUriValue. type Variable struct { _ struct{} `type:"structure"` - // The value of the variable as a structure that specifies a data set content + // The value of the variable as a structure that specifies a dataset content // version. DatasetContentVersionValue *DatasetContentVersionValue `locationName:"datasetContentVersionValue" type:"structure"` @@ -10463,15 +10769,15 @@ func (s *Variable) SetStringValue(v string) *Variable { return s } -// Information about the versioning of data set contents. +// Information about the versioning of dataset contents. type VersioningConfiguration struct { _ struct{} `type:"structure"` - // How many versions of data set contents will be kept. The "unlimited" parameter - // must be false. + // How many versions of dataset contents are kept. The unlimited parameter must + // be false. MaxVersions *int64 `locationName:"maxVersions" min:"1" type:"integer"` - // If true, unlimited versions of data set contents will be kept. + // If true, unlimited versions of dataset contents are kept. Unlimited *bool `locationName:"unlimited" type:"boolean"` } diff --git a/service/macie2/api.go b/service/macie2/api.go index d2a84ab639f..b6e029ed5e2 100644 --- a/service/macie2/api.go +++ b/service/macie2/api.go @@ -8583,8 +8583,8 @@ type DescribeClassificationJobOutput struct { Tags map[string]*string `locationName:"tags" type:"map"` // Provides information about when a classification job was paused and when - // it will expire and be cancelled if it isn’t resumed. This object is present - // only if a job’s current status (jobStatus) is USER_PAUSED. + // it will expire and be cancelled if it isn't resumed. This object is present + // only if a job's current status (jobStatus) is USER_PAUSED. UserPausedDetails *UserPausedDetails `locationName:"userPausedDetails" type:"structure"` } @@ -11005,8 +11005,8 @@ type JobSummary struct { Name *string `locationName:"name" type:"string"` // Provides information about when a classification job was paused and when - // it will expire and be cancelled if it isn’t resumed. This object is present - // only if a job’s current status (jobStatus) is USER_PAUSED. + // it will expire and be cancelled if it isn't resumed. This object is present + // only if a job's current status (jobStatus) is USER_PAUSED. UserPausedDetails *UserPausedDetails `locationName:"userPausedDetails" type:"structure"` } @@ -12016,13 +12016,13 @@ type Occurrences struct { Cells []*Cell `locationName:"cells" type:"list"` // Provides details about the location of occurrences of sensitive data in an - // Adobe Portable Document Format file, Apache Avro object container, Microsoft - // Word document, or non-binary text file. + // Adobe Portable Document Format file, Microsoft Word document, or non-binary + // text file. LineRanges []*Range `locationName:"lineRanges" type:"list"` // Provides details about the location of occurrences of sensitive data in an - // Adobe Portable Document Format file, Apache Avro object container, Microsoft - // Word document, or non-binary text file. + // Adobe Portable Document Format file, Microsoft Word document, or non-binary + // text file. OffsetRanges []*Range `locationName:"offsetRanges" type:"list"` // Specifies the location of occurrences of sensitive data in an Adobe Portable @@ -12080,13 +12080,13 @@ type Page struct { _ struct{} `type:"structure"` // Provides details about the location of an occurrence of sensitive data in - // an Adobe Portable Document Format file, Apache Avro object container, Microsoft - // Word document, or non-binary text file. + // an Adobe Portable Document Format file, Microsoft Word document, or non-binary + // text file. LineRange *Range `locationName:"lineRange" type:"structure"` // Provides details about the location of an occurrence of sensitive data in - // an Adobe Portable Document Format file, Apache Avro object container, Microsoft - // Word document, or non-binary text file. + // an Adobe Portable Document Format file, Microsoft Word document, or non-binary + // text file. OffsetRange *Range `locationName:"offsetRange" type:"structure"` PageNumber *int64 `locationName:"pageNumber" type:"long"` @@ -12231,8 +12231,8 @@ func (s *PutClassificationExportConfigurationOutput) SetConfiguration(v *Classif } // Provides details about the location of an occurrence of sensitive data in -// an Adobe Portable Document Format file, Apache Avro object container, Microsoft -// Word document, or non-binary text file. +// an Adobe Portable Document Format file, Microsoft Word document, or non-binary +// text file. type Range struct { _ struct{} `type:"structure"` @@ -12271,11 +12271,13 @@ func (s *Range) SetStartColumn(v int64) *Range { return s } -// Specifies the location of an occurrence of sensitive data in an Apache Parquet -// file. +// Specifies the location of an occurrence of sensitive data in an Apache Avro +// object container or Apache Parquet file. type Record struct { _ struct{} `type:"structure"` + JsonPath *string `locationName:"jsonPath" type:"string"` + RecordIndex *int64 `locationName:"recordIndex" type:"long"` } @@ -12289,6 +12291,12 @@ func (s Record) GoString() string { return s.String() } +// SetJsonPath sets the JsonPath field's value. +func (s *Record) SetJsonPath(v string) *Record { + s.JsonPath = &v + return s +} + // SetRecordIndex sets the RecordIndex field's value. func (s *Record) SetRecordIndex(v int64) *Record { s.RecordIndex = &v @@ -14371,8 +14379,8 @@ func (s *UserIdentityRoot) SetPrincipalId(v string) *UserIdentityRoot { } // Provides information about when a classification job was paused and when -// it will expire and be cancelled if it isn’t resumed. This object is present -// only if a job’s current status (jobStatus) is USER_PAUSED. +// it will expire and be cancelled if it isn't resumed. This object is present +// only if a job's current status (jobStatus) is USER_PAUSED. type UserPausedDetails struct { _ struct{} `type:"structure"` diff --git a/service/s3/api.go b/service/s3/api.go index bcf7f0344c5..2ab5d1dad25 100644 --- a/service/s3/api.go +++ b/service/s3/api.go @@ -1212,6 +1212,106 @@ func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBuc return out, req.Send() } +const opDeleteBucketIntelligentTieringConfiguration = "DeleteBucketIntelligentTieringConfiguration" + +// DeleteBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketIntelligentTieringConfiguration for more information on using the DeleteBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.DeleteBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBucketIntelligentTieringConfigurationInput) (req *request.Request, output *DeleteBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketIntelligentTieringConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + output = &DeleteBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfiguration(input *DeleteBucketIntelligentTieringConfigurationInput) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketIntelligentTieringConfigurationWithContext is the same as DeleteBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *DeleteBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -2798,6 +2898,105 @@ func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEnc return out, req.Send() } +const opGetBucketIntelligentTieringConfiguration = "GetBucketIntelligentTieringConfiguration" + +// GetBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketIntelligentTieringConfiguration for more information on using the GetBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.GetBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketIntelligentTieringConfigurationInput) (req *request.Request, output *GetBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketIntelligentTieringConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &GetBucketIntelligentTieringConfigurationInput{} + } + + output = &GetBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfiguration(input *GetBucketIntelligentTieringConfigurationInput) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketIntelligentTieringConfigurationWithContext is the same as GetBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *GetBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -4314,9 +4513,10 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // For more information about returning the ACL of an object, see GetObjectAcl // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). // -// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE -// storage classes, before you can retrieve the object you must first restore -// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// If the object you are retrieving is stored in the S3 Glacier, S3 Glacier +// Deep Archive, S3 Intelligent-Tiering Archive, or S3 Intelligent-Tiering Deep +// Archive storage classes, before you can retrieve the object you must first +// restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). // Otherwise, this operation returns an InvalidObjectStateError error. For information // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // @@ -4429,6 +4629,9 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // +// * ErrCodeInvalidObjectState "InvalidObjectState" +// Object is archived and inaccessible until restored. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) @@ -5379,6 +5582,105 @@ func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input return out, req.Send() } +const opListBucketIntelligentTieringConfigurations = "ListBucketIntelligentTieringConfigurations" + +// ListBucketIntelligentTieringConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketIntelligentTieringConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketIntelligentTieringConfigurations for more information on using the ListBucketIntelligentTieringConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketIntelligentTieringConfigurationsRequest method. +// req, resp := client.ListBucketIntelligentTieringConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucketIntelligentTieringConfigurationsInput) (req *request.Request, output *ListBucketIntelligentTieringConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketIntelligentTieringConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &ListBucketIntelligentTieringConfigurationsInput{} + } + + output = &ListBucketIntelligentTieringConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketIntelligentTieringConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurations(input *ListBucketIntelligentTieringConfigurationsInput) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketIntelligentTieringConfigurationsWithContext is the same as ListBucketIntelligentTieringConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketIntelligentTieringConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketIntelligentTieringConfigurationsWithContext(ctx aws.Context, input *ListBucketIntelligentTieringConfigurationsInput, opts ...request.Option) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the @@ -7066,6 +7368,106 @@ func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEnc return out, req.Send() } +const opPutBucketIntelligentTieringConfiguration = "PutBucketIntelligentTieringConfiguration" + +// PutBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketIntelligentTieringConfiguration for more information on using the PutBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.PutBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketIntelligentTieringConfigurationInput) (req *request.Request, output *PutBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketIntelligentTieringConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &PutBucketIntelligentTieringConfigurationInput{} + } + + output = &PutBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. S3 Intelligent-Tiering delivers +// automatic cost savings by moving data between access tiers, when access patterns +// change. +// +// The S3 Intelligent-Tiering storage class is suitable for objects larger than +// 128 KB that you plan to store for at least 30 days. If the size of an object +// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the frequent access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// If you delete an object before the end of the 30-day minimum storage duration +// period, you are charged for 30 days. For more information, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// * DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfiguration(input *PutBucketIntelligentTieringConfigurationInput) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketIntelligentTieringConfigurationWithContext is the same as PutBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *PutBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the @@ -7950,14 +8352,14 @@ func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControls // PutBucketOwnershipControls API operation for Amazon Simple Storage Service. // // Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this -// operation, you must have the s3:GetBucketOwnershipControls permission. For +// operation, you must have the s3:PutBucketOwnershipControls permission. For // more information about Amazon S3 permissions, see Specifying Permissions // in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). // // For information about Amazon S3 Object Ownership, see Using Object Ownership // (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). // -// The following operations are related to GetBucketOwnershipControls: +// The following operations are related to PutBucketOwnershipControls: // // * GetBucketOwnershipControls // @@ -9599,58 +10001,56 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Restoring Archives // -// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To -// access an archived object, you must first initiate a restore request. This -// restores a temporary copy of the archived object. In a restore request, you -// specify the number of days that you want the restored copy to exist. After -// the specified period, Amazon S3 deletes the temporary copy but the object -// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object -// was restored from. +// Objects that you archive to the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering +// Archive, or S3 Intelligent-Tiering Deep Archive storage classes are not accessible +// in real time. For objects in Archive Access tier or Deep Archive Access tier +// you must first initiate a restore request, and then wait until the object +// is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier +// Deep Archive you must first initiate a restore request, and then wait until +// a temporary copy of the object is available. To access an archived object, +// you must restore the object for the duration (number of days) that you specify. // // To restore a specific object version, you can provide a version ID. If you // don't provide a version ID, Amazon S3 restores the current version. // -// The time it takes restore jobs to finish depends on which storage class the -// object is being restored from and which data access tier you specify. -// // When restoring an archived object (or using a select request), you can specify // one of the following data access tier options in the Tier element of the // request body: // // * Expedited - Expedited retrievals allow you to quickly access your data -// stored in the GLACIER storage class when occasional urgent requests for -// a subset of archives are required. For all but the largest archived objects -// (250 MB+), data accessed using Expedited retrievals are typically made -// available within 1–5 minutes. Provisioned capacity ensures that retrieval -// capacity for Expedited retrievals is available when you need it. Expedited -// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE -// storage class. -// -// * Standard - S3 Standard retrievals allow you to access any of your archived -// objects within several hours. This is the default option for the GLACIER -// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval -// option. S3 Standard retrievals typically complete within 3-5 hours from -// the GLACIER storage class and typically complete within 12 hours from -// the DEEP_ARCHIVE storage class. -// -// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval -// option, enabling you to retrieve large amounts, even petabytes, of data -// inexpensively in a day. Bulk retrievals typically complete within 5-12 -// hours from the GLACIER storage class and typically complete within 48 -// hours from the DEEP_ARCHIVE storage class. +// stored in the S3 Glacier or S3 Intelligent-Tiering Archive storage class +// when occasional urgent requests for a subset of archives are required. +// For all but the largest archived objects (250 MB+), data accessed using +// Expedited retrievals is typically made available within 1–5 minutes. +// Provisioned capacity ensures that retrieval capacity for Expedited retrievals +// is available when you need it. Expedited retrievals and provisioned capacity +// are not available for objects stored in the S3 Glacier Deep Archive or +// S3 Intelligent-Tiering Deep Archive storage class. +// +// * Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval +// requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier +// or S3 Intelligent-Tiering Archive storage class. They typically finish +// within 12 hours for objects stored in the S3 Glacier Deep Archive or S3 +// Intelligent-Tiering Deep Archive storage class. Standard retrievals are +// free for objects stored in S3 Intelligent-Tiering. +// +// * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, +// enabling you to retrieve large amounts, even petabytes, of data inexpensively. +// Bulk retrievals typically finish within 5–12 hours for objects stored +// in the S3 Glacier or S3 Intelligent-Tiering Archive storage class. They +// typically finish within 48 hours for objects stored in the S3 Glacier +// Deep Archive or S3 Intelligent-Tiering Deep Archive storage class. Bulk +// retrievals are free for objects stored in S3 Intelligent-Tiering. // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) // in the Amazon Simple Storage Service Developer Guide. // // You can use Amazon S3 restore speed upgrade to change the restore speed to -// a faster speed while it is in progress. You upgrade the speed of an in-progress -// restoration by issuing another restore request to the same object, setting -// a new Tier request element. When issuing a request to upgrade the restore -// tier, you must choose a tier that is faster than the tier that the in-progress -// restore is using. You must not change any other parameters, such as the Days -// request element. For more information, see Upgrading the Speed of an In-Progress -// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// a faster speed while it is in progress. For more information, see Upgrading +// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) // in the Amazon Simple Storage Service Developer Guide. // // To get the status of object restoration, you can send a HEAD request. Operations @@ -9679,11 +10079,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // A successful operation returns either the 200 OK or 202 Accepted status code. // -// * If the object copy is not previously restored, then Amazon S3 returns -// 202 Accepted in the response. +// * If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. // -// * If the object copy is previously restored, Amazon S3 returns 200 OK -// in the response. +// * If the object is previously restored, Amazon S3 returns 200 OK in the +// response. // // Special Errors // @@ -9691,11 +10091,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // (This error does not apply to SELECT type requests.) HTTP Status Code: // 409 Conflict SOAP Fault Code Prefix: Client // -// * Code: GlacierExpeditedRetrievalNotAvailable Cause: S3 Glacier expedited -// retrievals are currently not available. Try again later. (Returned if -// there is insufficient capacity to process the Expedited request. This -// error applies only to Expedited retrievals and not to S3 Standard or Bulk -// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals +// are currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to +// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP +// Status Code: 503 SOAP Fault Code Prefix: N/A // // Related Resources // @@ -13919,6 +14319,110 @@ func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { return &s, nil } +type DeleteBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetBucket(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetId(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + type DeleteBucketInventoryConfigurationInput struct { _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` @@ -14269,6 +14773,9 @@ type DeleteBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -14819,24 +15326,25 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Specifies whether Amazon S3 replicates the delete markers. If you specify -// a Filter, you must specify this element. However, in the latest version of -// replication configuration (when Filter is specified), Amazon S3 doesn't replicate -// delete markers. Therefore, the DeleteMarkerReplication element can contain -// only Disabled. For an example configuration, see Basic Rule -// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a DeleteMarkerReplication +// element. If your Filter includes a Tag element, the DeleteMarkerReplication +// Status must be set to Disabled, because Amazon S3 does not support replicating +// delete markers for tag-based rules. For an example configuration, see Basic +// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). // -// If you don't specify the Filter element, Amazon S3 assumes that the replication -// configuration is the earlier version, V1. In the earlier version, Amazon -// S3 handled replication of delete markers differently. For more information, +// For more information about delete marker replication, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). type DeleteMarkerReplication struct { _ struct{} `type:"structure"` // Indicates whether to replicate delete markers. // - // In the current implementation, Amazon S3 doesn't replicate the delete markers. - // The status must be Disabled. + // Indicates whether to replicate delete markers. Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` } @@ -15597,9 +16105,8 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // A container specifying replication metrics-related settings enabling metrics - // and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified - // together with a ReplicationTime block. + // A container specifying replication metrics-related settings enabling replication + // metrics and events. Metrics *Metrics `type:"structure"` // A container specifying S3 Replication Time Control (S3 RTC), including whether @@ -16928,6 +17435,119 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv return s } +type GetBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"GetBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetBucket(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetId(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure" payload:"IntelligentTieringConfiguration"` + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *GetBucketIntelligentTieringConfigurationOutput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *GetBucketIntelligentTieringConfigurationOutput { + s.IntelligentTieringConfiguration = v + return s +} + type GetBucketInventoryConfigurationInput struct { _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` @@ -17709,6 +18329,9 @@ type GetBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } @@ -20261,7 +20884,7 @@ func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *Public type GlacierJobParameters struct { _ struct{} `type:"structure"` - // S3 Glacier retrieval tier at which the restore will be processed. + // Retrieval tier at which the restore will be processed. // // Tier is a required field Tier *string `type:"string" required:"true" enum:"Tier"` @@ -20809,6 +21432,9 @@ type HeadObjectOutput struct { // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + // The archive state of the head object. + ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` + // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` @@ -20980,6 +21606,12 @@ func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { return s } +// SetArchiveStatus sets the ArchiveStatus field's value. +func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { + s.ArchiveStatus = &v + return s +} + // SetCacheControl sets the CacheControl field's value. func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { s.CacheControl = &v @@ -21271,6 +21903,224 @@ func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { return s } +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the configuration applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the configuration + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s IntelligentTieringAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringAndOperator) SetPrefix(v string) *IntelligentTieringAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *IntelligentTieringAndOperator) SetTags(v []*Tag) *IntelligentTieringAndOperator { + s.Tags = v + return s +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see Storage +// class for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. + Filter *IntelligentTieringFilter `type:"structure"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies the status of the configuration. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"IntelligentTieringStatus"` + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // Tierings is a required field + Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s IntelligentTieringConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Tierings == nil { + invalidParams.Add(request.NewErrParamRequired("Tierings")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Tierings != nil { + for i, v := range s.Tierings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tierings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *IntelligentTieringConfiguration) SetFilter(v *IntelligentTieringFilter) *IntelligentTieringConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *IntelligentTieringConfiguration) SetId(v string) *IntelligentTieringConfiguration { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *IntelligentTieringConfiguration) SetStatus(v string) *IntelligentTieringConfiguration { + s.Status = &v + return s +} + +// SetTierings sets the Tierings field's value. +func (s *IntelligentTieringConfiguration) SetTierings(v []*Tiering) *IntelligentTieringConfiguration { + s.Tierings = v + return s +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration +// applies to. +type IntelligentTieringFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // A container of a key value name pair. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s IntelligentTieringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IntelligentTieringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *IntelligentTieringFilter) SetAnd(v *IntelligentTieringAndOperator) *IntelligentTieringFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringFilter) SetPrefix(v string) *IntelligentTieringFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { + s.Tag = v + return s +} + // Specifies the inventory configuration for an Amazon S3 bucket. For more information, // see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) // in the Amazon Simple Storage Service API Reference. @@ -22331,6 +23181,147 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str return s } +type ListBucketIntelligentTieringConfigurationsInput struct { + _ struct{} `locationName:"ListBucketIntelligentTieringConfigurationsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketIntelligentTieringConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetBucket(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.ContinuationToken = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketIntelligentTieringConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `type:"string"` + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []*IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIntelligentTieringConfigurationList sets the IntelligentTieringConfigurationList field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIntelligentTieringConfigurationList(v []*IntelligentTieringConfiguration) *ListBucketIntelligentTieringConfigurationsOutput { + s.IntelligentTieringConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIsTruncated(v bool) *ListBucketIntelligentTieringConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + type ListBucketInventoryConfigurationsInput struct { _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` @@ -24548,17 +25539,14 @@ func (s *MetadataEntry) SetValue(v string) *MetadataEntry { return s } -// A container specifying replication metrics-related settings enabling metrics -// and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified -// together with a ReplicationTime block. +// A container specifying replication metrics-related settings enabling replication +// metrics and events. type Metrics struct { _ struct{} `type:"structure"` // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold // event. - // - // EventThreshold is a required field - EventThreshold *ReplicationTimeValue `type:"structure" required:"true"` + EventThreshold *ReplicationTimeValue `type:"structure"` // Specifies whether the replication metrics are enabled. // @@ -24579,9 +25567,6 @@ func (s Metrics) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Metrics) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Metrics"} - if s.EventThreshold == nil { - invalidParams.Add(request.NewErrParamRequired("EventThreshold")) - } if s.Status == nil { invalidParams.Add(request.NewErrParamRequired("Status")) } @@ -25852,8 +26837,8 @@ type PublicAccessBlockConfiguration struct { // Specifies whether Amazon S3 should restrict public bucket policies for this // bucket. Setting this element to TRUE restricts access to this bucket to only - // AWS services and authorized users within this account if the bucket has a - // public policy. + // AWS service principals and authorized users within this account if the bucket + // has a public policy. // // Enabling this setting doesn't affect previously stored bucket policies, except // that public and cross-account access within any public bucket policy, including @@ -26556,6 +27541,129 @@ func (s PutBucketEncryptionOutput) GoString() string { return s.String() } +type PutBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"PutBucketIntelligentTieringConfigurationRequest" type:"structure" payload:"IntelligentTieringConfiguration"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Container for S3 Intelligent-Tiering configuration. + // + // IntelligentTieringConfiguration is a required field + IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IntelligentTieringConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("IntelligentTieringConfiguration")) + } + if s.IntelligentTieringConfiguration != nil { + if err := s.IntelligentTieringConfiguration.Validate(); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetBucket(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetId(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *PutBucketIntelligentTieringConfigurationInput { + s.IntelligentTieringConfiguration = v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + type PutBucketInventoryConfigurationInput struct { _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` @@ -27409,6 +28517,9 @@ type PutBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want @@ -27661,6 +28772,7 @@ type PutBucketReplicationInput struct { // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // A token to allow Object Lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -30211,16 +31323,18 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo type ReplicationRule struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 replicates the delete markers. If you specify - // a Filter, you must specify this element. However, in the latest version of - // replication configuration (when Filter is specified), Amazon S3 doesn't replicate - // delete markers. Therefore, the DeleteMarkerReplication element can contain - // only Disabled. For an example configuration, see Basic Rule - // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a DeleteMarkerReplication + // element. If your Filter includes a Tag element, the DeleteMarkerReplication + // Status must be set to Disabled, because Amazon S3 does not support replicating + // delete markers for tag-based rules. For an example configuration, see Basic + // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // For more information about delete marker replication, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). // - // If you don't specify the Filter element, Amazon S3 assumes that the replication - // configuration is the earlier version, V1. In the earlier version, Amazon - // S3 handled replication of delete markers differently. For more information, + // If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` @@ -30666,7 +31780,7 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { type RestoreObjectInput struct { _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` - // The bucket name or containing the object to restore. + // The bucket name containing the object to restore. // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. @@ -30857,6 +31971,9 @@ type RestoreRequest struct { // Lifetime of the active copy in days. Do not use with restores that specify // OutputLocation. + // + // The Days element is required for regular restores, and must not be provided + // for select requests. Days *int64 `type:"integer"` // The optional description for the job. @@ -30872,7 +31989,7 @@ type RestoreRequest struct { // Describes the parameters for Select job types. SelectParameters *SelectParameters `type:"structure"` - // S3 Glacier retrieval tier at which the restore will be processed. + // Retrieval tier at which the restore will be processed. Tier *string `type:"string" enum:"Tier"` // Type of restore request. @@ -32348,6 +33465,65 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { return s } +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. +type Tiering struct { + _ struct{} `type:"structure"` + + // S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing + // frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // AccessTier is a required field + AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` + + // The number of days that you want your archived data to be accessible. The + // minimum number of days specified in the restore request must be at least + // 90 days. If a smaller value is specifed it will be ignored. + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Tiering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tiering) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tiering) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tiering"} + if s.AccessTier == nil { + invalidParams.Add(request.NewErrParamRequired("AccessTier")) + } + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *Tiering) SetAccessTier(v string) *Tiering { + s.AccessTier = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Tiering) SetDays(v int64) *Tiering { + s.Days = &v + return s +} + // A container for specifying the configuration for publication of messages // to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 // detects specified events. @@ -33406,6 +34582,22 @@ func AnalyticsS3ExportFileFormat_Values() []string { } } +const ( + // ArchiveStatusArchiveAccess is a ArchiveStatus enum value + ArchiveStatusArchiveAccess = "ARCHIVE_ACCESS" + + // ArchiveStatusDeepArchiveAccess is a ArchiveStatus enum value + ArchiveStatusDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// ArchiveStatus_Values returns all elements of the ArchiveStatus enum +func ArchiveStatus_Values() []string { + return []string{ + ArchiveStatusArchiveAccess, + ArchiveStatusDeepArchiveAccess, + } +} + const ( // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value BucketAccelerateStatusEnabled = "Enabled" @@ -33801,6 +34993,38 @@ func FilterRuleName_Values() []string { } } +const ( + // IntelligentTieringAccessTierArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierArchiveAccess = "ARCHIVE_ACCESS" + + // IntelligentTieringAccessTierDeepArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// IntelligentTieringAccessTier_Values returns all elements of the IntelligentTieringAccessTier enum +func IntelligentTieringAccessTier_Values() []string { + return []string{ + IntelligentTieringAccessTierArchiveAccess, + IntelligentTieringAccessTierDeepArchiveAccess, + } +} + +const ( + // IntelligentTieringStatusEnabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusEnabled = "Enabled" + + // IntelligentTieringStatusDisabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusDisabled = "Disabled" +) + +// IntelligentTieringStatus_Values returns all elements of the IntelligentTieringStatus enum +func IntelligentTieringStatus_Values() []string { + return []string{ + IntelligentTieringStatusEnabled, + IntelligentTieringStatusDisabled, + } +} + const ( // InventoryFormatCsv is a InventoryFormat enum value InventoryFormatCsv = "CSV" diff --git a/service/s3/errors.go b/service/s3/errors.go index dd73d460cf3..f64b55135ee 100644 --- a/service/s3/errors.go +++ b/service/s3/errors.go @@ -21,6 +21,12 @@ const ( // bucket access control lists (ACLs). ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + ErrCodeInvalidObjectState = "InvalidObjectState" + // ErrCodeNoSuchBucket for service response error code // "NoSuchBucket". // diff --git a/service/s3/examples_test.go b/service/s3/examples_test.go index ba50e449fd3..157e7538729 100644 --- a/service/s3/examples_test.go +++ b/service/s3/examples_test.go @@ -411,14 +411,14 @@ func ExampleS3_DeleteBucketWebsite_shared00() { fmt.Println(result) } -// To delete an object (from a non-versioned bucket) +// To delete an object // -// The following example deletes an object from a non-versioned bucket. +// The following example deletes an object from an S3 bucket. func ExampleS3_DeleteObject_shared00() { svc := s3.New(session.New()) input := &s3.DeleteObjectInput{ - Bucket: aws.String("ExampleBucket"), - Key: aws.String("HappyFace.jpg"), + Bucket: aws.String("examplebucket"), + Key: aws.String("objectkey.jpg"), } result, err := svc.DeleteObject(input) @@ -439,14 +439,14 @@ func ExampleS3_DeleteObject_shared00() { fmt.Println(result) } -// To delete an object +// To delete an object (from a non-versioned bucket) // -// The following example deletes an object from an S3 bucket. +// The following example deletes an object from a non-versioned bucket. func ExampleS3_DeleteObject_shared01() { svc := s3.New(session.New()) input := &s3.DeleteObjectInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("objectkey.jpg"), + Bucket: aws.String("ExampleBucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.DeleteObject(input) @@ -934,14 +934,16 @@ func ExampleS3_GetBucketWebsite_shared00() { fmt.Println(result) } -// To retrieve an object +// To retrieve a byte range of an object // -// The following example retrieves an object for an S3 bucket. +// The following example retrieves an object for an S3 bucket. The request specifies +// the range header to retrieve a specific byte range. func ExampleS3_GetObject_shared00() { svc := s3.New(session.New()) input := &s3.GetObjectInput{ Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Key: aws.String("SampleFile.txt"), + Range: aws.String("bytes=0-9"), } result, err := svc.GetObject(input) @@ -950,6 +952,8 @@ func ExampleS3_GetObject_shared00() { switch aerr.Code() { case s3.ErrCodeNoSuchKey: fmt.Println(s3.ErrCodeNoSuchKey, aerr.Error()) + case s3.ErrCodeInvalidObjectState: + fmt.Println(s3.ErrCodeInvalidObjectState, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -964,16 +968,14 @@ func ExampleS3_GetObject_shared00() { fmt.Println(result) } -// To retrieve a byte range of an object +// To retrieve an object // -// The following example retrieves an object for an S3 bucket. The request specifies -// the range header to retrieve a specific byte range. +// The following example retrieves an object for an S3 bucket. func ExampleS3_GetObject_shared01() { svc := s3.New(session.New()) input := &s3.GetObjectInput{ Bucket: aws.String("examplebucket"), - Key: aws.String("SampleFile.txt"), - Range: aws.String("bytes=0-9"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.GetObject(input) @@ -982,6 +984,8 @@ func ExampleS3_GetObject_shared01() { switch aerr.Code() { case s3.ErrCodeNoSuchKey: fmt.Println(s3.ErrCodeNoSuchKey, aerr.Error()) + case s3.ErrCodeInvalidObjectState: + fmt.Println(s3.ErrCodeInvalidObjectState, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -1026,16 +1030,14 @@ func ExampleS3_GetObjectAcl_shared00() { fmt.Println(result) } -// To retrieve tag set of a specific object version +// To retrieve tag set of an object // -// The following example retrieves tag set of an object. The request specifies object -// version. +// The following example retrieves tag set of an object. func ExampleS3_GetObjectTagging_shared00() { svc := s3.New(session.New()) input := &s3.GetObjectTaggingInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.GetObjectTagging(input) @@ -1056,14 +1058,16 @@ func ExampleS3_GetObjectTagging_shared00() { fmt.Println(result) } -// To retrieve tag set of an object +// To retrieve tag set of a specific object version // -// The following example retrieves tag set of an object. +// The following example retrieves tag set of an object. The request specifies object +// version. func ExampleS3_GetObjectTagging_shared01() { svc := s3.New(session.New()) input := &s3.GetObjectTaggingInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"), } result, err := svc.GetObjectTagging(input) @@ -1804,17 +1808,20 @@ func ExampleS3_PutBucketWebsite_shared00() { fmt.Println(result) } -// To upload an object +// To upload object and specify user-defined metadata // -// The following example uploads an object to a versioning-enabled bucket. The source -// file is specified using Windows file syntax. S3 returns VersionId of the newly created -// object. +// The following example creates an object. The request also specifies optional metadata. +// If the bucket is versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared00() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Key: aws.String("exampleobject"), + Metadata: map[string]*string{ + "metadata1": aws.String("value1"), + "metadata2": aws.String("value2"), + }, } result, err := svc.PutObject(input) @@ -1835,20 +1842,16 @@ func ExampleS3_PutObject_shared00() { fmt.Println(result) } -// To upload object and specify user-defined metadata +// To create an object. // -// The following example creates an object. The request also specifies optional metadata. -// If the bucket is versioning enabled, S3 returns version ID in response. +// The following example creates an object. If the bucket is versioning enabled, S3 +// returns version ID in response. func ExampleS3_PutObject_shared01() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - Metadata: map[string]*string{ - "metadata1": aws.String("value1"), - "metadata2": aws.String("value2"), - }, + Key: aws.String("objectkey"), } result, err := svc.PutObject(input) @@ -1869,19 +1872,17 @@ func ExampleS3_PutObject_shared01() { fmt.Println(result) } -// To upload an object and specify server-side encryption and object tags +// To upload an object and specify optional tags // -// The following example uploads and object. The request specifies the optional server-side -// encryption option. The request also specifies optional object tags. If the bucket -// is versioning enabled, S3 returns version ID in response. +// The following example uploads an object. The request specifies optional object tags. +// The bucket is versioned, therefore S3 returns version ID of the newly created object. func ExampleS3_PutObject_shared02() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - ServerSideEncryption: aws.String("AES256"), - Tagging: aws.String("key1=value1&key2=value2"), + Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), + Tagging: aws.String("key1=value1&key2=value2"), } result, err := svc.PutObject(input) @@ -1902,17 +1903,19 @@ func ExampleS3_PutObject_shared02() { fmt.Println(result) } -// To upload an object and specify optional tags +// To upload an object and specify server-side encryption and object tags // -// The following example uploads an object. The request specifies optional object tags. -// The bucket is versioned, therefore S3 returns version ID of the newly created object. +// The following example uploads and object. The request specifies the optional server-side +// encryption option. The request also specifies optional object tags. If the bucket +// is versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared03() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), - Tagging: aws.String("key1=value1&key2=value2"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + ServerSideEncryption: aws.String("AES256"), + Tagging: aws.String("key1=value1&key2=value2"), } result, err := svc.PutObject(input) @@ -1933,18 +1936,18 @@ func ExampleS3_PutObject_shared03() { fmt.Println(result) } -// To upload an object (specify optional headers) +// To upload an object and specify canned ACL. // -// The following example uploads an object. The request specifies optional request headers -// to directs S3 to use specific storage class and use server-side encryption. +// The following example uploads and object. The request specifies optional canned ACL +// (access control list) to all READ access to authenticated users. If the bucket is +// versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared04() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), - ServerSideEncryption: aws.String("AES256"), - StorageClass: aws.String("STANDARD_IA"), + ACL: aws.String("authenticated-read"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), } result, err := svc.PutObject(input) @@ -1965,18 +1968,18 @@ func ExampleS3_PutObject_shared04() { fmt.Println(result) } -// To upload an object and specify canned ACL. +// To upload an object (specify optional headers) // -// The following example uploads and object. The request specifies optional canned ACL -// (access control list) to all READ access to authenticated users. If the bucket is -// versioning enabled, S3 returns version ID in response. +// The following example uploads an object. The request specifies optional request headers +// to directs S3 to use specific storage class and use server-side encryption. func ExampleS3_PutObject_shared05() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - ACL: aws.String("authenticated-read"), - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), + Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), + ServerSideEncryption: aws.String("AES256"), + StorageClass: aws.String("STANDARD_IA"), } result, err := svc.PutObject(input) @@ -1997,16 +2000,17 @@ func ExampleS3_PutObject_shared05() { fmt.Println(result) } -// To create an object. +// To upload an object // -// The following example creates an object. If the bucket is versioning enabled, S3 -// returns version ID in response. +// The following example uploads an object to a versioning-enabled bucket. The source +// file is specified using Windows file syntax. S3 returns VersionId of the newly created +// object. func ExampleS3_PutObject_shared06() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), Bucket: aws.String("examplebucket"), - Key: aws.String("objectkey"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.PutObject(input) diff --git a/service/s3/s3iface/interface.go b/service/s3/s3iface/interface.go index bca091d758e..7c622187843 100644 --- a/service/s3/s3iface/interface.go +++ b/service/s3/s3iface/interface.go @@ -96,6 +96,10 @@ type S3API interface { DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) + DeleteBucketIntelligentTieringConfiguration(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.DeleteBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationRequest(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput) + DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) @@ -164,6 +168,10 @@ type S3API interface { GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error) GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) + GetBucketIntelligentTieringConfiguration(*s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.GetBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationRequest(*s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput) + GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) @@ -272,6 +280,10 @@ type S3API interface { ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) + ListBucketIntelligentTieringConfigurations(*s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsWithContext(aws.Context, *s3.ListBucketIntelligentTieringConfigurationsInput, ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsRequest(*s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput) + ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) @@ -339,6 +351,10 @@ type S3API interface { PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error) PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) + PutBucketIntelligentTieringConfiguration(*s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.PutBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationRequest(*s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput) + PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) diff --git a/service/ssm/api.go b/service/ssm/api.go index 8750afbcaa8..5d85ceb1a81 100644 --- a/service/ssm/api.go +++ b/service/ssm/api.go @@ -15297,8 +15297,6 @@ type AssociationFilter struct { // The name of the filter. // - // InstanceId has been deprecated. - // // Key is a required field Key *string `locationName:"key" type:"string" required:"true" enum:"AssociationFilterKey"` @@ -34019,11 +34017,6 @@ type ListAssociationsInput struct { _ struct{} `type:"structure"` // One or more filters. Use a filter to return a more specific list of results. - // - // Filtering associations using the InstanceID attribute only returns legacy - // associations created using the InstanceID attribute. Associations targeting - // the instance that are part of the Target Attributes ResourceGroup or Tags - // are not returned. AssociationFilterList []*AssociationFilter `min:"1" type:"list"` // The maximum number of items to return for this call. The call also returns @@ -47704,6 +47697,9 @@ const ( // AutomationExecutionFilterKeyTagKey is a AutomationExecutionFilterKey enum value AutomationExecutionFilterKeyTagKey = "TagKey" + + // AutomationExecutionFilterKeyTargetResourceGroup is a AutomationExecutionFilterKey enum value + AutomationExecutionFilterKeyTargetResourceGroup = "TargetResourceGroup" ) // AutomationExecutionFilterKey_Values returns all elements of the AutomationExecutionFilterKey enum @@ -47718,6 +47714,7 @@ func AutomationExecutionFilterKey_Values() []string { AutomationExecutionFilterKeyStartTimeAfter, AutomationExecutionFilterKeyAutomationType, AutomationExecutionFilterKeyTagKey, + AutomationExecutionFilterKeyTargetResourceGroup, } } diff --git a/service/storagegateway/api.go b/service/storagegateway/api.go index 65fbaedb38b..e70963b0a4d 100644 --- a/service/storagegateway/api.go +++ b/service/storagegateway/api.go @@ -2768,6 +2768,107 @@ func (c *StorageGateway) DescribeBandwidthRateLimitWithContext(ctx aws.Context, return out, req.Send() } +const opDescribeBandwidthRateLimitSchedule = "DescribeBandwidthRateLimitSchedule" + +// DescribeBandwidthRateLimitScheduleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBandwidthRateLimitSchedule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBandwidthRateLimitSchedule for more information on using the DescribeBandwidthRateLimitSchedule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeBandwidthRateLimitScheduleRequest method. +// req, resp := client.DescribeBandwidthRateLimitScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeBandwidthRateLimitSchedule +func (c *StorageGateway) DescribeBandwidthRateLimitScheduleRequest(input *DescribeBandwidthRateLimitScheduleInput) (req *request.Request, output *DescribeBandwidthRateLimitScheduleOutput) { + op := &request.Operation{ + Name: opDescribeBandwidthRateLimitSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBandwidthRateLimitScheduleInput{} + } + + output = &DescribeBandwidthRateLimitScheduleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeBandwidthRateLimitSchedule API operation for AWS Storage Gateway. +// +// Returns information about the bandwidth rate limit schedule of a gateway. +// By default, gateways do not have bandwidth rate limit schedules, which means +// no bandwidth rate limiting is in effect. This operation is supported only +// in the volume and tape gateway types. +// +// This operation returns information about a gateway's bandwidth rate limit +// schedule. A bandwidth rate limit schedule consists of one or more bandwidth +// rate limit intervals. A bandwidth rate limit interval defines a period of +// time on one or more days of the week, during which bandwidth rate limits +// are specified for uploading, downloading, or both. +// +// A bandwidth rate limit interval consists of one or more days of the week, +// a start hour and minute, an ending hour and minute, and bandwidth rate limits +// for uploading and downloading +// +// If no bandwidth rate limit schedule intervals are set for the gateway, this +// operation returns an empty response. To specify which gateway to describe, +// use the Amazon Resource Name (ARN) of the gateway in your request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation DescribeBandwidthRateLimitSchedule for usage and error information. +// +// Returned Error Types: +// * InvalidGatewayRequestException +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * InternalServerError +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeBandwidthRateLimitSchedule +func (c *StorageGateway) DescribeBandwidthRateLimitSchedule(input *DescribeBandwidthRateLimitScheduleInput) (*DescribeBandwidthRateLimitScheduleOutput, error) { + req, out := c.DescribeBandwidthRateLimitScheduleRequest(input) + return out, req.Send() +} + +// DescribeBandwidthRateLimitScheduleWithContext is the same as DescribeBandwidthRateLimitSchedule with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBandwidthRateLimitSchedule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) DescribeBandwidthRateLimitScheduleWithContext(ctx aws.Context, input *DescribeBandwidthRateLimitScheduleInput, opts ...request.Option) (*DescribeBandwidthRateLimitScheduleOutput, error) { + req, out := c.DescribeBandwidthRateLimitScheduleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeCache = "DescribeCache" // DescribeCacheRequest generates a "aws/request.Request" representing the @@ -5311,6 +5412,12 @@ func (c *StorageGateway) ListTapePoolsRequest(input *ListTapePoolsInput) (req *r Name: opListTapePools, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -5372,6 +5479,58 @@ func (c *StorageGateway) ListTapePoolsWithContext(ctx aws.Context, input *ListTa return out, req.Send() } +// ListTapePoolsPages iterates over the pages of a ListTapePools operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTapePools method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTapePools operation. +// pageNum := 0 +// err := client.ListTapePoolsPages(params, +// func(page *storagegateway.ListTapePoolsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *StorageGateway) ListTapePoolsPages(input *ListTapePoolsInput, fn func(*ListTapePoolsOutput, bool) bool) error { + return c.ListTapePoolsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTapePoolsPagesWithContext same as ListTapePoolsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) ListTapePoolsPagesWithContext(ctx aws.Context, input *ListTapePoolsInput, fn func(*ListTapePoolsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTapePoolsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTapePoolsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTapePoolsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTapes = "ListTapes" // ListTapesRequest generates a "aws/request.Request" representing the @@ -7072,6 +7231,94 @@ func (c *StorageGateway) UpdateBandwidthRateLimitWithContext(ctx aws.Context, in return out, req.Send() } +const opUpdateBandwidthRateLimitSchedule = "UpdateBandwidthRateLimitSchedule" + +// UpdateBandwidthRateLimitScheduleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBandwidthRateLimitSchedule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBandwidthRateLimitSchedule for more information on using the UpdateBandwidthRateLimitSchedule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBandwidthRateLimitScheduleRequest method. +// req, resp := client.UpdateBandwidthRateLimitScheduleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateBandwidthRateLimitSchedule +func (c *StorageGateway) UpdateBandwidthRateLimitScheduleRequest(input *UpdateBandwidthRateLimitScheduleInput) (req *request.Request, output *UpdateBandwidthRateLimitScheduleOutput) { + op := &request.Operation{ + Name: opUpdateBandwidthRateLimitSchedule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBandwidthRateLimitScheduleInput{} + } + + output = &UpdateBandwidthRateLimitScheduleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBandwidthRateLimitSchedule API operation for AWS Storage Gateway. +// +// Updates the bandwidth rate limit schedule for a specified gateway. By default, +// gateways do not have bandwidth rate limit schedules, which means no bandwidth +// rate limiting is in effect. Use this to initiate or update a gateway's bandwidth +// rate limit schedule. This operation is supported in the volume and tape gateway +// types. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation UpdateBandwidthRateLimitSchedule for usage and error information. +// +// Returned Error Types: +// * InvalidGatewayRequestException +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * InternalServerError +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateBandwidthRateLimitSchedule +func (c *StorageGateway) UpdateBandwidthRateLimitSchedule(input *UpdateBandwidthRateLimitScheduleInput) (*UpdateBandwidthRateLimitScheduleOutput, error) { + req, out := c.UpdateBandwidthRateLimitScheduleRequest(input) + return out, req.Send() +} + +// UpdateBandwidthRateLimitScheduleWithContext is the same as UpdateBandwidthRateLimitSchedule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBandwidthRateLimitSchedule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) UpdateBandwidthRateLimitScheduleWithContext(ctx aws.Context, input *UpdateBandwidthRateLimitScheduleInput, opts ...request.Option) (*UpdateBandwidthRateLimitScheduleOutput, error) { + req, out := c.UpdateBandwidthRateLimitScheduleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateChapCredentials = "UpdateChapCredentials" // UpdateChapCredentialsRequest generates a "aws/request.Request" representing the @@ -8967,6 +9214,142 @@ func (s *AutomaticTapeCreationRule) SetWorm(v bool) *AutomaticTapeCreationRule { return s } +// Describes a bandwidth rate limit interval for a gateway. A bandwidth rate +// limit schedule consists of one or more bandwidth rate limit intervals. A +// bandwidth rate limit interval defines a period of time on one or more days +// of the week, during which bandwidth rate limits are specified for uploading, +// downloading, or both. +type BandwidthRateLimitInterval struct { + _ struct{} `type:"structure"` + + // The average download rate limit component of the bandwidth rate limit interval, + // in bits per second. This field does not appear in the response if the download + // rate limit is not set. + AverageDownloadRateLimitInBitsPerSec *int64 `min:"102400" type:"long"` + + // The average upload rate limit component of the bandwidth rate limit interval, + // in bits per second. This field does not appear in the response if the upload + // rate limit is not set. + AverageUploadRateLimitInBitsPerSec *int64 `min:"51200" type:"long"` + + // The days of the week component of the bandwidth rate limit interval, represented + // as ordinal numbers from 0 to 6, where 0 represents Sunday and 6 Saturday. + // + // DaysOfWeek is a required field + DaysOfWeek []*int64 `min:"1" type:"list" required:"true"` + + // The hour of the day to end the bandwidth rate limit interval. + // + // EndHourOfDay is a required field + EndHourOfDay *int64 `type:"integer" required:"true"` + + // The minute of the hour to end the bandwidth rate limit interval. + // + // The bandwidth rate limit interval ends at the end of the minute. To end an + // interval at the end of an hour, use the value 59. + // + // EndMinuteOfHour is a required field + EndMinuteOfHour *int64 `type:"integer" required:"true"` + + // The hour of the day to start the bandwidth rate limit interval. + // + // StartHourOfDay is a required field + StartHourOfDay *int64 `type:"integer" required:"true"` + + // The minute of the hour to start the bandwidth rate limit interval. The interval + // begins at the start of that minute. To begin an interval exactly at the start + // of the hour, use the value 0. + // + // StartMinuteOfHour is a required field + StartMinuteOfHour *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s BandwidthRateLimitInterval) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BandwidthRateLimitInterval) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BandwidthRateLimitInterval) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BandwidthRateLimitInterval"} + if s.AverageDownloadRateLimitInBitsPerSec != nil && *s.AverageDownloadRateLimitInBitsPerSec < 102400 { + invalidParams.Add(request.NewErrParamMinValue("AverageDownloadRateLimitInBitsPerSec", 102400)) + } + if s.AverageUploadRateLimitInBitsPerSec != nil && *s.AverageUploadRateLimitInBitsPerSec < 51200 { + invalidParams.Add(request.NewErrParamMinValue("AverageUploadRateLimitInBitsPerSec", 51200)) + } + if s.DaysOfWeek == nil { + invalidParams.Add(request.NewErrParamRequired("DaysOfWeek")) + } + if s.DaysOfWeek != nil && len(s.DaysOfWeek) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DaysOfWeek", 1)) + } + if s.EndHourOfDay == nil { + invalidParams.Add(request.NewErrParamRequired("EndHourOfDay")) + } + if s.EndMinuteOfHour == nil { + invalidParams.Add(request.NewErrParamRequired("EndMinuteOfHour")) + } + if s.StartHourOfDay == nil { + invalidParams.Add(request.NewErrParamRequired("StartHourOfDay")) + } + if s.StartMinuteOfHour == nil { + invalidParams.Add(request.NewErrParamRequired("StartMinuteOfHour")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAverageDownloadRateLimitInBitsPerSec sets the AverageDownloadRateLimitInBitsPerSec field's value. +func (s *BandwidthRateLimitInterval) SetAverageDownloadRateLimitInBitsPerSec(v int64) *BandwidthRateLimitInterval { + s.AverageDownloadRateLimitInBitsPerSec = &v + return s +} + +// SetAverageUploadRateLimitInBitsPerSec sets the AverageUploadRateLimitInBitsPerSec field's value. +func (s *BandwidthRateLimitInterval) SetAverageUploadRateLimitInBitsPerSec(v int64) *BandwidthRateLimitInterval { + s.AverageUploadRateLimitInBitsPerSec = &v + return s +} + +// SetDaysOfWeek sets the DaysOfWeek field's value. +func (s *BandwidthRateLimitInterval) SetDaysOfWeek(v []*int64) *BandwidthRateLimitInterval { + s.DaysOfWeek = v + return s +} + +// SetEndHourOfDay sets the EndHourOfDay field's value. +func (s *BandwidthRateLimitInterval) SetEndHourOfDay(v int64) *BandwidthRateLimitInterval { + s.EndHourOfDay = &v + return s +} + +// SetEndMinuteOfHour sets the EndMinuteOfHour field's value. +func (s *BandwidthRateLimitInterval) SetEndMinuteOfHour(v int64) *BandwidthRateLimitInterval { + s.EndMinuteOfHour = &v + return s +} + +// SetStartHourOfDay sets the StartHourOfDay field's value. +func (s *BandwidthRateLimitInterval) SetStartHourOfDay(v int64) *BandwidthRateLimitInterval { + s.StartHourOfDay = &v + return s +} + +// SetStartMinuteOfHour sets the StartMinuteOfHour field's value. +func (s *BandwidthRateLimitInterval) SetStartMinuteOfHour(v int64) *BandwidthRateLimitInterval { + s.StartMinuteOfHour = &v + return s +} + // Lists refresh cache information. type CacheAttributes struct { _ struct{} `type:"structure"` @@ -12284,6 +12667,82 @@ func (s *DescribeBandwidthRateLimitOutput) SetGatewayARN(v string) *DescribeBand return s } +type DescribeBandwidthRateLimitScheduleInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + // + // GatewayARN is a required field + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBandwidthRateLimitScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBandwidthRateLimitScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBandwidthRateLimitScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBandwidthRateLimitScheduleInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *DescribeBandwidthRateLimitScheduleInput) SetGatewayARN(v string) *DescribeBandwidthRateLimitScheduleInput { + s.GatewayARN = &v + return s +} + +type DescribeBandwidthRateLimitScheduleOutput struct { + _ struct{} `type:"structure"` + + // An array that contains the bandwidth rate limit intervals for a tape or volume + // gateway. + BandwidthRateLimitIntervals []*BandwidthRateLimitInterval `type:"list"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DescribeBandwidthRateLimitScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBandwidthRateLimitScheduleOutput) GoString() string { + return s.String() +} + +// SetBandwidthRateLimitIntervals sets the BandwidthRateLimitIntervals field's value. +func (s *DescribeBandwidthRateLimitScheduleOutput) SetBandwidthRateLimitIntervals(v []*BandwidthRateLimitInterval) *DescribeBandwidthRateLimitScheduleOutput { + s.BandwidthRateLimitIntervals = v + return s +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *DescribeBandwidthRateLimitScheduleOutput) SetGatewayARN(v string) *DescribeBandwidthRateLimitScheduleOutput { + s.GatewayARN = &v + return s +} + type DescribeCacheInput struct { _ struct{} `type:"structure"` @@ -18372,6 +18831,98 @@ func (s *UpdateBandwidthRateLimitOutput) SetGatewayARN(v string) *UpdateBandwidt return s } +type UpdateBandwidthRateLimitScheduleInput struct { + _ struct{} `type:"structure"` + + // An array containing bandwidth rate limit schedule intervals for a gateway. + // When no bandwidth rate limit intervals have been scheduled, the array is + // empty. + // + // BandwidthRateLimitIntervals is a required field + BandwidthRateLimitIntervals []*BandwidthRateLimitInterval `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + // + // GatewayARN is a required field + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateBandwidthRateLimitScheduleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBandwidthRateLimitScheduleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBandwidthRateLimitScheduleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBandwidthRateLimitScheduleInput"} + if s.BandwidthRateLimitIntervals == nil { + invalidParams.Add(request.NewErrParamRequired("BandwidthRateLimitIntervals")) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.BandwidthRateLimitIntervals != nil { + for i, v := range s.BandwidthRateLimitIntervals { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "BandwidthRateLimitIntervals", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBandwidthRateLimitIntervals sets the BandwidthRateLimitIntervals field's value. +func (s *UpdateBandwidthRateLimitScheduleInput) SetBandwidthRateLimitIntervals(v []*BandwidthRateLimitInterval) *UpdateBandwidthRateLimitScheduleInput { + s.BandwidthRateLimitIntervals = v + return s +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *UpdateBandwidthRateLimitScheduleInput) SetGatewayARN(v string) *UpdateBandwidthRateLimitScheduleInput { + s.GatewayARN = &v + return s +} + +type UpdateBandwidthRateLimitScheduleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateBandwidthRateLimitScheduleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBandwidthRateLimitScheduleOutput) GoString() string { + return s.String() +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *UpdateBandwidthRateLimitScheduleOutput) SetGatewayARN(v string) *UpdateBandwidthRateLimitScheduleOutput { + s.GatewayARN = &v + return s +} + // A JSON object containing one or more of the following fields: // // * UpdateChapCredentialsInput$InitiatorName diff --git a/service/storagegateway/storagegatewayiface/interface.go b/service/storagegateway/storagegatewayiface/interface.go index 748e07f5699..cf3fb0bfe48 100644 --- a/service/storagegateway/storagegatewayiface/interface.go +++ b/service/storagegateway/storagegatewayiface/interface.go @@ -180,6 +180,10 @@ type StorageGatewayAPI interface { DescribeBandwidthRateLimitWithContext(aws.Context, *storagegateway.DescribeBandwidthRateLimitInput, ...request.Option) (*storagegateway.DescribeBandwidthRateLimitOutput, error) DescribeBandwidthRateLimitRequest(*storagegateway.DescribeBandwidthRateLimitInput) (*request.Request, *storagegateway.DescribeBandwidthRateLimitOutput) + DescribeBandwidthRateLimitSchedule(*storagegateway.DescribeBandwidthRateLimitScheduleInput) (*storagegateway.DescribeBandwidthRateLimitScheduleOutput, error) + DescribeBandwidthRateLimitScheduleWithContext(aws.Context, *storagegateway.DescribeBandwidthRateLimitScheduleInput, ...request.Option) (*storagegateway.DescribeBandwidthRateLimitScheduleOutput, error) + DescribeBandwidthRateLimitScheduleRequest(*storagegateway.DescribeBandwidthRateLimitScheduleInput) (*request.Request, *storagegateway.DescribeBandwidthRateLimitScheduleOutput) + DescribeCache(*storagegateway.DescribeCacheInput) (*storagegateway.DescribeCacheOutput, error) DescribeCacheWithContext(aws.Context, *storagegateway.DescribeCacheInput, ...request.Option) (*storagegateway.DescribeCacheOutput, error) DescribeCacheRequest(*storagegateway.DescribeCacheInput) (*request.Request, *storagegateway.DescribeCacheOutput) @@ -301,6 +305,9 @@ type StorageGatewayAPI interface { ListTapePoolsWithContext(aws.Context, *storagegateway.ListTapePoolsInput, ...request.Option) (*storagegateway.ListTapePoolsOutput, error) ListTapePoolsRequest(*storagegateway.ListTapePoolsInput) (*request.Request, *storagegateway.ListTapePoolsOutput) + ListTapePoolsPages(*storagegateway.ListTapePoolsInput, func(*storagegateway.ListTapePoolsOutput, bool) bool) error + ListTapePoolsPagesWithContext(aws.Context, *storagegateway.ListTapePoolsInput, func(*storagegateway.ListTapePoolsOutput, bool) bool, ...request.Option) error + ListTapes(*storagegateway.ListTapesInput) (*storagegateway.ListTapesOutput, error) ListTapesWithContext(aws.Context, *storagegateway.ListTapesInput, ...request.Option) (*storagegateway.ListTapesOutput, error) ListTapesRequest(*storagegateway.ListTapesInput) (*request.Request, *storagegateway.ListTapesOutput) @@ -375,6 +382,10 @@ type StorageGatewayAPI interface { UpdateBandwidthRateLimitWithContext(aws.Context, *storagegateway.UpdateBandwidthRateLimitInput, ...request.Option) (*storagegateway.UpdateBandwidthRateLimitOutput, error) UpdateBandwidthRateLimitRequest(*storagegateway.UpdateBandwidthRateLimitInput) (*request.Request, *storagegateway.UpdateBandwidthRateLimitOutput) + UpdateBandwidthRateLimitSchedule(*storagegateway.UpdateBandwidthRateLimitScheduleInput) (*storagegateway.UpdateBandwidthRateLimitScheduleOutput, error) + UpdateBandwidthRateLimitScheduleWithContext(aws.Context, *storagegateway.UpdateBandwidthRateLimitScheduleInput, ...request.Option) (*storagegateway.UpdateBandwidthRateLimitScheduleOutput, error) + UpdateBandwidthRateLimitScheduleRequest(*storagegateway.UpdateBandwidthRateLimitScheduleInput) (*request.Request, *storagegateway.UpdateBandwidthRateLimitScheduleOutput) + UpdateChapCredentials(*storagegateway.UpdateChapCredentialsInput) (*storagegateway.UpdateChapCredentialsOutput, error) UpdateChapCredentialsWithContext(aws.Context, *storagegateway.UpdateChapCredentialsInput, ...request.Option) (*storagegateway.UpdateChapCredentialsOutput, error) UpdateChapCredentialsRequest(*storagegateway.UpdateChapCredentialsInput) (*request.Request, *storagegateway.UpdateChapCredentialsOutput)