diff --git a/.changes/1.34.162.json b/.changes/1.34.162.json new file mode 100644 index 0000000000..03313bbe4a --- /dev/null +++ b/.changes/1.34.162.json @@ -0,0 +1,22 @@ +[ + { + "category": "``docdb``", + "description": "This release adds Global Cluster Failover capability which enables you to change your global cluster's primary AWS region, the region that serves writes, during a regional outage. Performing a failover action preserves your Global Cluster setup.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "This release introduces a new ContainerDefinition configuration to support the customer-managed keys for ECS container restart feature.", + "type": "api-change" + }, + { + "category": "``iam``", + "description": "Make the LastUsedDate field in the GetAccessKeyLastUsed response optional. This may break customers who only call the API for access keys with a valid LastUsedDate. This fixes a deserialization issue for access keys without a LastUsedDate, because the field was marked as required but could be null.", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3c7ca0fd18..7779efcaa5 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,15 @@ CHANGELOG ========= +1.34.162 +======== + +* api-change:``docdb``: This release adds Global Cluster Failover capability which enables you to change your global cluster's primary AWS region, the region that serves writes, during a regional outage. Performing a failover action preserves your Global Cluster setup. +* api-change:``ecs``: This release introduces a new ContainerDefinition configuration to support the customer-managed keys for ECS container restart feature. +* api-change:``iam``: Make the LastUsedDate field in the GetAccessKeyLastUsed response optional. This may break customers who only call the API for access keys with a valid LastUsedDate. This fixes a deserialization issue for access keys without a LastUsedDate, because the field was marked as required but could be null. +* api-change:``s3``: Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API. + + 1.34.161 ======== diff --git a/botocore/__init__.py b/botocore/__init__.py index 2b552ca867..46e292e5b2 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.34.161' +__version__ = '1.34.162' class NullHandler(logging.Handler): diff --git a/botocore/data/docdb/2014-10-31/service-2.json b/botocore/data/docdb/2014-10-31/service-2.json index eda9de9b07..462207e810 100644 --- a/botocore/data/docdb/2014-10-31/service-2.json +++ b/botocore/data/docdb/2014-10-31/service-2.json @@ -640,6 +640,25 @@ ], "documentation":"
Forces a failover for a cluster.
A failover for a cluster promotes one of the Amazon DocumentDB replicas (read-only instances) in the cluster to be the primary instance (the cluster writer).
If the primary instance fails, Amazon DocumentDB automatically fails over to an Amazon DocumentDB replica, if one exists. You can force a failover when you want to simulate a failure of a primary instance for testing.
" }, + "FailoverGlobalCluster":{ + "name":"FailoverGlobalCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"FailoverGlobalClusterMessage"}, + "output":{ + "shape":"FailoverGlobalClusterResult", + "resultWrapper":"FailoverGlobalClusterResult" + }, + "errors":[ + {"shape":"GlobalClusterNotFoundFault"}, + {"shape":"InvalidGlobalClusterStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"Promotes the specified secondary DB cluster to be the primary DB cluster in the global cluster when failing over a global cluster occurs.
Use this operation to respond to an unplanned event, such as a regional disaster in the primary region. Failing over can result in a loss of write transaction data that wasn't replicated to the chosen secondary before the failover event occurred. However, the recovery process that promotes a DB instance on the chosen seconday DB cluster to be the primary writer DB instance guarantees that the data is in a transactionally consistent state.
" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -1798,6 +1817,12 @@ }, "exception":true }, + "DBClusterIdentifier":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z][0-9A-Za-z-:._]*" + }, "DBClusterList":{ "type":"list", "member":{ @@ -3362,6 +3387,37 @@ "DBCluster":{"shape":"DBCluster"} } }, + "FailoverGlobalClusterMessage":{ + "type":"structure", + "required":[ + "GlobalClusterIdentifier", + "TargetDbClusterIdentifier" + ], + "members":{ + "GlobalClusterIdentifier":{ + "shape":"GlobalClusterIdentifier", + "documentation":"The identifier of the Amazon DocumentDB global cluster to apply this operation. The identifier is the unique key assigned by the user when the cluster is created. In other words, it's the name of the global cluster.
Constraints:
Must match the identifier of an existing global cluster.
Minimum length of 1. Maximum length of 255.
Pattern: [A-Za-z][0-9A-Za-z-:._]*
The identifier of the secondary Amazon DocumentDB cluster that you want to promote to the primary for the global cluster. Use the Amazon Resource Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web Services region.
Constraints:
Must match the identifier of an existing secondary cluster.
Minimum length of 1. Maximum length of 255.
Pattern: [A-Za-z][0-9A-Za-z-:._]*
Specifies whether to allow data loss for this global cluster operation. Allowing data loss triggers a global failover operation.
If you don't specify AllowDataLoss
, the global cluster operation defaults to a switchover.
Constraints:
Can't be specified together with the Switchover
parameter.
Specifies whether to switch over this global database cluster.
Constraints:
Can't be specified together with the AllowDataLoss
parameter.
The identifier of the Amazon DocumentDB global database cluster to switch over. The identifier is the unique key assigned by the user when the cluster is created. In other words, it's the name of the global cluster. This parameter isn’t case-sensitive.
Constraints:
Must match the identifier of an existing global cluster (Amazon DocumentDB global database).
Minimum length of 1. Maximum length of 255.
Pattern: [A-Za-z][0-9A-Za-z-:._]*
The identifier of the secondary Amazon DocumentDB cluster to promote to the new primary for the global database cluster. Use the Amazon Resource Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its Amazon Web Services region.
Constraints:
Must match the identifier of an existing secondary cluster.
Minimum length of 1. Maximum length of 255.
Pattern: [A-Za-z][0-9A-Za-z-:._]*
Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
" + "documentation":"Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
For information about the maximum number of task sets and other quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
" }, "DeleteAccountSetting":{ "name":"DeleteAccountSetting", @@ -653,7 +653,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"Registers a new task definition from the supplied family
and containerDefinitions
. Optionally, you can add data volumes to your containers with the volumes
parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.
You can specify a role for your task with the taskRoleArn
parameter. When you specify a role for a task, its containers can then use the latest versions of the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the policy that's associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition with the networkMode
parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc
network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.
Registers a new task definition from the supplied family
and containerDefinitions
. Optionally, you can add data volumes to your containers with the volumes
parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.
You can specify a role for your task with the taskRoleArn
parameter. When you specify a role for a task, its containers can then use the latest versions of the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the policy that's associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition with the networkMode
parameter. If you specify the awsvpc
network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.
Modifies the status of an Amazon ECS container instance.
Once a container instance has reached an ACTIVE
state, you can change the status of a container instance to DRAINING
to manually remove an instance from a cluster, for example to perform system updates, update the Docker daemon, or scale down the cluster size.
A container instance can't be changed to DRAINING
until it has reached an ACTIVE
status. If the instance is in any other status, an error will be received.
When you set a container instance to DRAINING
, Amazon ECS prevents new tasks from being scheduled for placement on the container instance and replacement service tasks are started on other container instances in the cluster if the resources are available. Service tasks on the container instance that are in the PENDING
state are stopped immediately.
Service tasks on the container instance that are in the RUNNING
state are stopped and replaced according to the service's deployment configuration parameters, minimumHealthyPercent
and maximumPercent
. You can change the deployment configuration of your service using UpdateService.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during task replacement. For example, desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. If the minimum is 100%, the service scheduler can't remove existing tasks until the replacement tasks are considered healthy. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during task replacement. You can use this to define the replacement batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four tasks to be drained, provided that the cluster resources required to do this are available. If the maximum is 100%, then replacement tasks can't start until the draining tasks have stopped.
Any PENDING
or RUNNING
tasks that do not belong to a service aren't affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
tasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to ACTIVE
status and once it has reached that status the Amazon ECS scheduler can begin scheduling tasks on the instance again.
Modifies the status of an Amazon ECS container instance.
Once a container instance has reached an ACTIVE
state, you can change the status of a container instance to DRAINING
to manually remove an instance from a cluster, for example to perform system updates, update the Docker daemon, or scale down the cluster size.
A container instance can't be changed to DRAINING
until it has reached an ACTIVE
status. If the instance is in any other status, an error will be received.
When you set a container instance to DRAINING
, Amazon ECS prevents new tasks from being scheduled for placement on the container instance and replacement service tasks are started on other container instances in the cluster if the resources are available. Service tasks on the container instance that are in the PENDING
state are stopped immediately.
Service tasks on the container instance that are in the RUNNING
state are stopped and replaced according to the service's deployment configuration parameters, minimumHealthyPercent
and maximumPercent
. You can change the deployment configuration of your service using UpdateService.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during task replacement. For example, desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. If the minimum is 100%, the service scheduler can't remove existing tasks until the replacement tasks are considered healthy. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during task replacement. You can use this to define the replacement batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four tasks to be drained, provided that the cluster resources required to do this are available. If the maximum is 100%, then replacement tasks can't start until the draining tasks have stopped.
Any PENDING
or RUNNING
tasks that do not belong to a service aren't affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
tasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to ACTIVE
status and once it has reached that status the Amazon ECS scheduler can begin scheduling tasks on the instance again.
Whether the task's elastic network interface receives a public IP address. The default value is DISABLED
.
An object representing the networking details for a task or service. For example awsvpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}
An object representing the networking details for a task or service. For example awsVpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}
.
Message that describes the cause of the exception.
" } }, - "documentation":"These errors are usually caused by a client action. This client action might be using an action or resource on behalf of a user that doesn't have permissions to use the action or resource. Or, it might be specifying an identifier that isn't valid.
", + "documentation":"These errors are usually caused by a client action. This client action might be using an action or resource on behalf of a user that doesn't have permissions to use the action or resource. Or, it might be specifying an identifier that isn't valid.
The following list includes additional causes for the error:
The RunTask
could not be processed because you use managed scaling and there is a capacity error because the quota of tasks in the PROVISIONING
per cluster has been reached. For information about the service quotas, see Amazon ECS service quotas.
The name of a container. If you're linking multiple containers together in a task definition, the name
of one container can be entered in the links
of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name
in the Create a container section of the Docker Remote API and the --name
option to docker run.
The name of a container. If you're linking multiple containers together in a task definition, the name
of one container can be entered in the links
of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name
in tthe docker create-container command and the --name
option to docker run.
The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag
or repository-url/image@digest
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the Create a container section of the Docker Remote API and the IMAGE
parameter of docker run.
When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.
Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag
or registry/repository@digest
. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest
or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE
.
Images in official repositories on Docker Hub use a single name (for example, ubuntu
or mongo
).
Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent
).
Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu
).
The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag
or repository-url/image@digest
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the docker create-container command and the IMAGE
parameter of docker run.
When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.
Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag
or registry/repository@digest
. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest
or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE
.
Images in official repositories on Docker Hub use a single name (for example, ubuntu
or mongo
).
Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent
).
Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu
).
The number of cpu
units reserved for the container. This parameter maps to CpuShares
in the Create a container section of the Docker Remote API and the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.
Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.
On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see CPU share constraint in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2. However, the CPU parameter isn't required, and you can use CPU values below 2 in your container definitions. For CPU values below 2 (including null), the behavior varies based on your Amazon ECS container agent version:
Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.
Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.
On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as 0
, which Windows interprets as 1% of one CPU.
The number of cpu
units reserved for the container. This parameter maps to CpuShares
in the docker create-container commandand the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.
Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.
On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:
Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.
Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.
Agent versions greater than or equal to 1.84.0: CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.
On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as 0
, which Windows interprets as 1% of one CPU.
The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory
value, if one is specified. This parameter maps to Memory
in the Create a container section of the Docker Remote API and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory
and memoryReservation
value, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.
The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
" + "documentation":"The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory
value, if one is specified. This parameter maps to Memory
in thethe docker create-container command and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory
and memoryReservation
value, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.
The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
" }, "memoryReservation":{ "shape":"BoxedInteger", - "documentation":"The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory
parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation
in the Create a container section of the Docker Remote API and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory
or memoryReservation
in a container definition. If you specify both, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory
is used.
For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation
of 128 MiB, and a memory
hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.
The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
" + "documentation":"The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory
parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation
in the the docker create-container command and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory
or memoryReservation
in a container definition. If you specify both, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory
is used.
For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation
of 128 MiB, and a memory
hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.
The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
" }, "links":{ "shape":"StringList", - "documentation":"The links
parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge
. The name:internalName
construct is analogous to name:alias
in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to Legacy container links in the Docker documentation. This parameter maps to Links
in the Create a container section of the Docker Remote API and the --link
option to docker run.
This parameter is not supported for Windows containers.
Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.
The links
parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge
. The name:internalName
construct is analogous to name:alias
in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links
in the docker create-container command and the --link
option to docker run.
This parameter is not supported for Windows containers.
Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.
The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.
For task definitions that use the awsvpc
network mode, only specify the containerPort
. The hostPort
can be left blank or it must be the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than localhost
. There's no loopback for port mappings on Windows, so you can't access a container's mapped port from the host itself.
This parameter maps to PortBindings
in the Create a container section of the Docker Remote API and the --publish
option to docker run. If the network mode of a task definition is set to none
, then you can't specify port mappings. If the network mode of a task definition is set to host
, then host ports must either be undefined or they must match the container port in the port mapping.
After a task reaches the RUNNING
status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings
section DescribeTasks responses.
The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.
For task definitions that use the awsvpc
network mode, only specify the containerPort
. The hostPort
can be left blank or it must be the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than localhost
. There's no loopback for port mappings on Windows, so you can't access a container's mapped port from the host itself.
This parameter maps to PortBindings
in the the docker create-container command and the --publish
option to docker run. If the network mode of a task definition is set to none
, then you can't specify port mappings. If the network mode of a task definition is set to host
, then host ports must either be undefined or they must match the container port in the port mapping.
After a task reaches the RUNNING
status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings
section DescribeTasks responses.
If the essential
parameter of a container is marked as true
, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential
parameter of a container is marked as false
, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.
All tasks must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide.
" }, + "restartPolicy":{ + "shape":"ContainerRestartPolicy", + "documentation":"The restart policy for a container. When you set up a restart policy, Amazon ECS can restart the container without needing to replace the task. For more information, see Restart individual containers in Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.
" + }, "entryPoint":{ "shape":"StringList", - "documentation":"Early versions of the Amazon ECS container agent don't properly handle entryPoint
parameters. If you have problems using entryPoint
, update your container agent or enter your commands and arguments as command
array items instead.
The entry point that's passed to the container. This parameter maps to Entrypoint
in the Create a container section of the Docker Remote API and the --entrypoint
option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.
Early versions of the Amazon ECS container agent don't properly handle entryPoint
parameters. If you have problems using entryPoint
, update your container agent or enter your commands and arguments as command
array items instead.
The entry point that's passed to the container. This parameter maps to Entrypoint
in tthe docker create-container command and the --entrypoint
option to docker run.
The command that's passed to the container. This parameter maps to Cmd
in the Create a container section of the Docker Remote API and the COMMAND
parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each argument is a separated string in the array.
The command that's passed to the container. This parameter maps to Cmd
in the docker create-container command and the COMMAND
parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.
The environment variables to pass to a container. This parameter maps to Env
in the Create a container section of the Docker Remote API and the --env
option to docker run.
We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.
The environment variables to pass to a container. This parameter maps to Env
in the docker create-container command and the --env
option to docker run.
We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.
A list of files containing the environment variables to pass to a container. This parameter maps to the --env-file
option to docker run.
You can specify up to ten environment files. The file must have a .env
file extension. Each line in an environment file contains an environment variable in VARIABLE=VALUE
format. Lines beginning with #
are treated as comments and are ignored. For more information about the environment variable file syntax, see Declare default environment variables in file.
If there are environment variables specified using the environment
parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.
A list of files containing the environment variables to pass to a container. This parameter maps to the --env-file
option to docker run.
You can specify up to ten environment files. The file must have a .env
file extension. Each line in an environment file contains an environment variable in VARIABLE=VALUE
format. Lines beginning with #
are treated as comments and are ignored.
If there are environment variables specified using the environment
parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying Environment Variables in the Amazon Elastic Container Service Developer Guide.
The mount points for data volumes in your container.
This parameter maps to Volumes
in the Create a container section of the Docker Remote API and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as $env:ProgramData
. Windows containers can't mount directories on a different drive, and mount point can't be across drives.
The mount points for data volumes in your container.
This parameter maps to Volumes
in the the docker create-container command and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as $env:ProgramData
. Windows containers can't mount directories on a different drive, and mount point can't be across drives.
Data volumes to mount from another container. This parameter maps to VolumesFrom
in the Create a container section of the Docker Remote API and the --volumes-from
option to docker run.
Data volumes to mount from another container. This parameter maps to VolumesFrom
in tthe docker create-container command and the --volumes-from
option to docker run.
Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE
, SUCCESS
, or HEALTHY
status. If a startTimeout
value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED
state.
When the ECS_CONTAINER_START_TIMEOUT
container agent configuration variable is used, it's enforced independently from this start timeout value.
For tasks using the Fargate launch type, the task or service requires the following platforms:
Linux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
For tasks using the EC2 launch type, your container instances require at least version 1.26.0
of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1
of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The valid values are 2-120 seconds.
" + "documentation":"Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE
, SUCCESS
, or HEALTHY
status. If a startTimeout
value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED
state.
When the ECS_CONTAINER_START_TIMEOUT
container agent configuration variable is used, it's enforced independently from this start timeout value.
For tasks using the Fargate launch type, the task or service requires the following platforms:
Linux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
For tasks using the EC2 launch type, your container instances require at least version 1.26.0
of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1
of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The valid values for Fargate are 2-120 seconds.
" }, "stopTimeout":{ "shape":"BoxedInteger", @@ -1641,71 +1645,71 @@ }, "hostname":{ "shape":"String", - "documentation":"The hostname to use for your container. This parameter maps to Hostname
in the Create a container section of the Docker Remote API and the --hostname
option to docker run.
The hostname
parameter is not supported if you're using the awsvpc
network mode.
The hostname to use for your container. This parameter maps to Hostname
in thethe docker create-container command and the --hostname
option to docker run.
The hostname
parameter is not supported if you're using the awsvpc
network mode.
The user to use inside the container. This parameter maps to User
in the Create a container section of the Docker Remote API and the --user
option to docker run.
When running tasks using the host
network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.
You can specify the user
using the following formats. If specifying a UID or GID, you must specify it as a positive integer.
user
user:group
uid
uid:gid
user:gid
uid:group
This parameter is not supported for Windows containers.
The user to use inside the container. This parameter maps to User
in the docker create-container command and the --user
option to docker run.
When running tasks using the host
network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.
You can specify the user
using the following formats. If specifying a UID or GID, you must specify it as a positive integer.
user
user:group
uid
uid:gid
user:gid
uid:group
This parameter is not supported for Windows containers.
The working directory to run commands inside the container in. This parameter maps to WorkingDir
in the Create a container section of the Docker Remote API and the --workdir
option to docker run.
The working directory to run commands inside the container in. This parameter maps to WorkingDir
in the docker create-container command and the --workdir
option to docker run.
When this parameter is true, networking is off within the container. This parameter maps to NetworkDisabled
in the Create a container section of the Docker Remote API.
This parameter is not supported for Windows containers.
When this parameter is true, networking is off within the container. This parameter maps to NetworkDisabled
in the docker create-container command.
This parameter is not supported for Windows containers.
When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root
user). This parameter maps to Privileged
in the Create a container section of the Docker Remote API and the --privileged
option to docker run.
This parameter is not supported for Windows containers or tasks run on Fargate.
When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root
user). This parameter maps to Privileged
in the the docker create-container command and the --privileged
option to docker run
This parameter is not supported for Windows containers or tasks run on Fargate.
When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs
in the Create a container section of the Docker Remote API and the --read-only
option to docker run.
This parameter is not supported for Windows containers.
When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs
in the docker create-container command and the --read-only
option to docker run.
This parameter is not supported for Windows containers.
A list of DNS servers that are presented to the container. This parameter maps to Dns
in the Create a container section of the Docker Remote API and the --dns
option to docker run.
This parameter is not supported for Windows containers.
A list of DNS servers that are presented to the container. This parameter maps to Dns
in the the docker create-container command and the --dns
option to docker run.
This parameter is not supported for Windows containers.
A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch
in the Create a container section of the Docker Remote API and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch
in the docker create-container command and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
A list of hostnames and IP address mappings to append to the /etc/hosts
file on the container. This parameter maps to ExtraHosts
in the Create a container section of the Docker Remote API and the --add-host
option to docker run.
This parameter isn't supported for Windows containers or tasks that use the awsvpc
network mode.
A list of hostnames and IP address mappings to append to the /etc/hosts
file on the container. This parameter maps to ExtraHosts
in the docker create-container command and the --add-host
option to docker run.
This parameter isn't supported for Windows containers or tasks that use the awsvpc
network mode.
A list of strings to provide custom configuration for multiple security systems. For more information about valid values, see Docker Run Security Configuration. This field isn't valid for containers in tasks using the Fargate launch type.
For Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.
For any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers in the Amazon Elastic Container Service Developer Guide.
This parameter maps to SecurityOpt
in the Create a container section of the Docker Remote API and the --security-opt
option to docker run.
The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true
or ECS_APPARMOR_CAPABLE=true
environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
For more information about valid values, see Docker Run Security Configuration.
Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"
" + "documentation":"A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.
For Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.
For any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers in the Amazon Elastic Container Service Developer Guide.
This parameter maps to SecurityOpt
in the docker create-container command and the --security-opt
option to docker run.
The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true
or ECS_APPARMOR_CAPABLE=true
environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"
" }, "interactive":{ "shape":"BoxedBoolean", - "documentation":"When this parameter is true
, you can deploy containerized applications that require stdin
or a tty
to be allocated. This parameter maps to OpenStdin
in the Create a container section of the Docker Remote API and the --interactive
option to docker run.
When this parameter is true
, you can deploy containerized applications that require stdin
or a tty
to be allocated. This parameter maps to OpenStdin
in the docker create-container command and the --interactive
option to docker run.
When this parameter is true
, a TTY is allocated. This parameter maps to Tty
in the Create a container section of the Docker Remote API and the --tty
option to docker run.
When this parameter is true
, a TTY is allocated. This parameter maps to Tty
in tthe docker create-container command and the --tty
option to docker run.
A key/value map of labels to add to the container. This parameter maps to Labels
in the Create a container section of the Docker Remote API and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
A key/value map of labels to add to the container. This parameter maps to Labels
in the docker create-container command and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
A list of ulimits
to set in the container. If a ulimit
value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits
in the Create a container section of the Docker Remote API and the --ulimit
option to docker run. Valid naming values are displayed in the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 1024
and the default hard limit is 65535
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
This parameter is not supported for Windows containers.
A list of ulimits
to set in the container. If a ulimit
value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits
in tthe docker create-container command and the --ulimit
option to docker run. Valid naming values are displayed in the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 65535
and the default hard limit is 65535
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
This parameter is not supported for Windows containers.
The log configuration specification for the container.
This parameter maps to LogConfig
in the Create a container section of the Docker Remote API and the --log-driver
option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
The log configuration specification for the container.
This parameter maps to LogConfig
in the docker create-container command and the --log-driver
option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options).
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck
in the Create a container section of the Docker Remote API and the HEALTHCHECK
parameter of docker run.
The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck
in the docker create-container command and the HEALTHCHECK
parameter of docker run.
A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls
in the Create a container section of the Docker Remote API and the --sysctl
option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time
setting to maintain longer lived connections.
A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls
in tthe docker create-container command and the --sysctl
option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time
setting to maintain longer lived connections.
Specifies whether a restart policy is enabled for the container.
" + }, + "ignoredExitCodes":{ + "shape":"IntegerList", + "documentation":"A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can specify a maximum of 50 container exit codes. By default, Amazon ECS does not ignore any exit codes.
" + }, + "restartAttemptPeriod":{ + "shape":"BoxedInteger", + "documentation":"A period of time (in seconds) that the container must run for before a restart can be attempted. A container can be restarted only once every restartAttemptPeriod
seconds. If a container isn't able to run for this time period and exits early, it will not be restarted. You can set a minimum restartAttemptPeriod
of 60 seconds and a maximum restartAttemptPeriod
of 1800 seconds. By default, a container must run for 300 seconds before it can be restarted.
You can enable a restart policy for each container defined in your task definition, to overcome transient failures faster and maintain task availability. When you enable a restart policy for a container, Amazon ECS can restart the container if it exits, without needing to replace the task. For more information, see Restart individual containers in Amazon ECS tasks with container restart policies in the Amazon Elastic Container Service Developer Guide.
" + }, "ContainerStateChange":{ "type":"structure", "members":{ @@ -2921,15 +2944,15 @@ }, "driver":{ "shape":"String", - "documentation":"The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls
to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see Docker plugin discovery. This parameter maps to Driver
in the Create a volume section of the Docker Remote API and the xxdriver
option to docker volume create.
The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls
to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to Driver
in the docker create-container command and the xxdriver
option to docker volume create.
A map of Docker driver-specific options passed through. This parameter maps to DriverOpts
in the Create a volume section of the Docker Remote API and the xxopt
option to docker volume create.
A map of Docker driver-specific options passed through. This parameter maps to DriverOpts
in the docker create-volume command and the xxopt
option to docker volume create.
Custom metadata to add to your Docker volume. This parameter maps to Labels
in the Create a volume section of the Docker Remote API and the xxlabel
option to docker volume create.
Custom metadata to add to your Docker volume. This parameter maps to Labels
in the docker create-container command and the xxlabel
option to docker volume create.
This parameter is specified when you're using Docker volumes. Docker volumes are only supported when you're using the EC2 launch type. Windows containers only support the use of the local
driver. To use bind mounts, specify a host
instead.
A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD
to run the command arguments directly, or CMD-SHELL
to run the command with the container's default shell.
When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in double quotes and brackets.
[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]
You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.
CMD-SHELL, curl -f http://localhost/ || exit 1
An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck
in the Create a container section of the Docker Remote API.
A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD
to run the command arguments directly, or CMD-SHELL
to run the command with the container's default shell.
When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in double quotes and brackets.
[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]
You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.
CMD-SHELL, curl -f http://localhost/ || exit 1
An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck
in tthe docker create-container command
The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the startPeriod
is off.
If a health check succeeds within the startPeriod
, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.
An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK
parameter of docker run.
The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.
You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.
The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.
Amazon ECS performs health checks on containers with the default that launched the container instance or the task.
The following describes the possible healthStatus
values for a container:
HEALTHY
-The container health check has passed successfully.
UNHEALTHY
-The container health check has failed.
UNKNOWN
-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.
The following describes the possible healthStatus
values based on the container health checker status of essential containers in the task with the following priority order (high to low):
UNHEALTHY
-One or more essential containers have failed their health check.
UNKNOWN
-Any essential container running within the task is in an UNKNOWN
state and no other essential containers have an UNHEALTHY
state.
HEALTHY
-All essential containers within the task have passed their health checks.
Consider the following task health example with 2 containers.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
, the task health is HEALTHY
.
Consider the following task health example with 3 containers.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, and Container3 is UNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, and Container3 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is HEALTHY
, and Container3 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, and Container3 is HEALTHY
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, and Container3 is UNKNOWN
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
, and Container3 is HEALTHY
, the task health is HEALTHY
.
If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.
The following are notes about container health check support:
If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't cause a container to transition to an UNHEALTHY
status. This is by design, to ensure that containers remain running during agent restarts or temporary unavailability. The health check status is the \"last heard from\" response from the Amazon ECS agent, so if the container was considered HEALTHY
prior to the disconnect, that status will remain until the agent reconnects and another health check occurs. There are no assumptions made about the status of the container health checks.
Container health checks require version 1.17.0
or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.
Container health checks are supported for Fargate tasks if you're using platform version 1.1.0
or greater. For more information, see Fargate platform versions.
Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.
An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK
parameter of docker run.
The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.
You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.
The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.
Amazon ECS performs health checks on containers with the default that launched the container instance or the task.
The following describes the possible healthStatus
values for a container:
HEALTHY
-The container health check has passed successfully.
UNHEALTHY
-The container health check has failed.
UNKNOWN
-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.
The following describes the possible healthStatus
values based on the container health checker status of essential containers in the task with the following priority order (high to low):
UNHEALTHY
-One or more essential containers have failed their health check.
UNKNOWN
-Any essential container running within the task is in an UNKNOWN
state and no other essential containers have an UNHEALTHY
state.
HEALTHY
-All essential containers within the task have passed their health checks.
Consider the following task health example with 2 containers.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
, the task health is HEALTHY
.
Consider the following task health example with 3 containers.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, and Container3 is UNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, and Container3 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is HEALTHY
, and Container3 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, and Container3 is HEALTHY
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, and Container3 is UNKNOWN
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
, and Container3 is HEALTHY
, the task health is HEALTHY
.
If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.
The following are notes about container health check support:
If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't cause a container to transition to an UNHEALTHY
status. This is by design, to ensure that containers remain running during agent restarts or temporary unavailability. The health check status is the \"last heard from\" response from the Amazon ECS agent, so if the container was considered HEALTHY
prior to the disconnect, that status will remain until the agent reconnects and another health check occurs. There are no assumptions made about the status of the container health checks.
Container health checks require version 1.17.0
or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.
Container health checks are supported for Fargate tasks if you're using platform version 1.1.0
or greater. For more information, see Fargate platform versions.
Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.
The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd
in the Create a container section of the Docker Remote API and the --cap-add
option to docker run.
Tasks launched on Fargate only support adding the SYS_PTRACE
kernel capability.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"
The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd
in the docker create-container command and the --cap-add
option to docker run.
Tasks launched on Fargate only support adding the SYS_PTRACE
kernel capability.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"
The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop
in the Create a container section of the Docker Remote API and the --cap-drop
option to docker run.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"
The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop
in the docker create-container command and the --cap-drop
option to docker run.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"
The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more information about the default capabilities and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run reference. For more detailed information about these Linux capabilities, see the capabilities(7) Linux manual page.
" + "documentation":"The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more detailed information about these Linux capabilities, see the capabilities(7) Linux manual page.
" }, "KeyValuePair":{ "type":"structure", @@ -3514,7 +3541,7 @@ }, "devices":{ "shape":"DevicesList", - "documentation":"Any host devices to expose to the container. This parameter maps to Devices
in the Create a container section of the Docker Remote API and the --device
option to docker run.
If you're using tasks that use the Fargate launch type, the devices
parameter isn't supported.
Any host devices to expose to the container. This parameter maps to Devices
in tthe docker create-container command and the --device
option to docker run.
If you're using tasks that use the Fargate launch type, the devices
parameter isn't supported.
The value for the size (in MiB) of the /dev/shm
volume. This parameter maps to the --shm-size
option to docker run.
If you are using tasks that use the Fargate launch type, the sharedMemorySize
parameter is not supported.
The value for the size (in MiB) of the /dev/shm
volume. This parameter maps to the --shm-size
option to docker run.
If you are using tasks that use the Fargate launch type, the sharedMemorySize
parameter is not supported.
The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs
option to docker run.
If you're using tasks that use the Fargate launch type, the tmpfs
parameter isn't supported.
The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the --tmpfs
option to docker run.
If you're using tasks that use the Fargate launch type, the tmpfs
parameter isn't supported.
This allows you to tune a container's memory swappiness behavior. A swappiness
value of 0
will cause swapping to not happen unless absolutely necessary. A swappiness
value of 100
will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0
and 100
. If the swappiness
parameter is not specified, a default value of 60
is used. If a value is not specified for maxSwap
then this parameter is ignored. This parameter maps to the --memory-swappiness
option to docker run.
If you're using tasks that use the Fargate launch type, the swappiness
parameter isn't supported.
If you're using tasks on Amazon Linux 2023 the swappiness
parameter isn't supported.
This allows you to tune a container's memory swappiness behavior. A swappiness
value of 0
will cause swapping to not happen unless absolutely necessary. A swappiness
value of 100
will cause pages to be swapped very aggressively. Accepted values are whole numbers between 0
and 100
. If the swappiness
parameter is not specified, a default value of 60
is used. If a value is not specified for maxSwap
then this parameter is ignored. This parameter maps to the --memory-swappiness
option to docker run.
If you're using tasks that use the Fargate launch type, the swappiness
parameter isn't supported.
If you're using tasks on Amazon Linux 2023 the swappiness
parameter isn't supported.
The Linux-specific options that are applied to the container, such as Linux KernelCapabilities.
" @@ -3934,7 +3961,7 @@ "members":{ "logDriver":{ "shape":"LogDriver", - "documentation":"The log driver to use for the container.
For tasks on Fargate, the supported log drivers are awslogs
, splunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs
, fluentd
, gelf
, json-file
, journald
, logentries
,syslog
, splunk
, and awsfirelens
.
For more information about using the awslogs
log driver, see Using the awslogs log driver in the Amazon Elastic Container Service Developer Guide.
For more information about using the awsfirelens
log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.
If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.
The log driver to use for the container.
For tasks on Fargate, the supported log drivers are awslogs
, splunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs
, fluentd
, gelf
, json-file
, journald
, syslog
, splunk
, and awsfirelens
.
For more information about using the awslogs
log driver, see Send Amazon ECS logs to CloudWatch in the Amazon Elastic Container Service Developer Guide.
For more information about using the awsfirelens
log driver, see Send Amazon ECS logs to an Amazon Web Services service or Amazon Web Services Partner.
If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.
The secrets to pass to the log configuration. For more information, see Specifying sensitive data in the Amazon Elastic Container Service Developer Guide.
" } }, - "documentation":"The log configuration for the container. This parameter maps to LogConfig
in the Create a container section of the Docker Remote API and the --log-driver
option to docker run
.
By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.
Understand the following when specifying a log configuration for your containers.
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.
For tasks on Fargate, the supported log drivers are awslogs
, splunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs
, fluentd
, gelf
, json-file
, journald
, logentries
,syslog
, splunk
, and awsfirelens
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.
For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.
The log configuration for the container. This parameter maps to LogConfig
in the docker create-container command and the --log-driver
option to docker run.
By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.
Understand the following when specifying a log configuration for your containers.
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.
For tasks on Fargate, the supported log drivers are awslogs
, splunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs
, fluentd
, gelf
, json-file
, journald
,syslog
, splunk
, and awsfirelens
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.
For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.
The port number range on the container that's bound to the dynamically mapped host port range.
The following rules apply when you specify a containerPortRange
:
You must use either the bridge
network mode or the awsvpc
network mode.
This parameter is available for both the EC2 and Fargate launch types.
This parameter is available for both the Linux and Windows operating systems.
The container instance must have at least version 1.67.0 of the container agent and at least version 1.67.0-1 of the ecs-init
package
You can specify a maximum of 100 port ranges per container.
You do not specify a hostPortRange
. The value of the hostPortRange
is set as follows:
For containers in a task with the awsvpc
network mode, the hostPortRange
is set to the same value as the containerPortRange
. This is a static mapping strategy.
For containers in a task with the bridge
network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.
The containerPortRange
valid values are between 1 and 65535.
A port can only be included in one port mapping per container.
You cannot specify overlapping port ranges.
The first port in the range must be less than last port in the range.
Docker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.
For more information, see Issue #11185 on the Github website.
For information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide.
You can call DescribeTasks
to view the hostPortRange
which are the host ports that are bound to the container ports.
Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.
If you use containers in a task with the awsvpc
or host
network mode, specify the exposed ports using containerPort
. The hostPort
can be left blank or it must be the same value as the containerPort
.
Most fields of this parameter (containerPort
, hostPort
, protocol
) maps to PortBindings
in the Create a container section of the Docker Remote API and the --publish
option to docker run
. If the network mode of a task definition is set to host
, host ports must either be undefined or match the container port in the port mapping.
You can't expose the same container port for multiple protocols. If you attempt this, an error is returned.
After a task reaches the RUNNING
status, manual and automatic host and container port assignments are visible in the networkBindings
section of DescribeTasks API responses.
Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.
If you use containers in a task with the awsvpc
or host
network mode, specify the exposed ports using containerPort
. The hostPort
can be left blank or it must be the same value as the containerPort
.
Most fields of this parameter (containerPort
, hostPort
, protocol
) maps to PortBindings
in the docker create-container command and the --publish
option to docker run
. If the network mode of a task definition is set to host
, host ports must either be undefined or match the container port in the port mapping.
You can't expose the same container port for multiple protocols. If you attempt this, an error is returned.
After a task reaches the RUNNING
status, manual and automatic host and container port assignments are visible in the networkBindings
section of DescribeTasks API responses.
The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
" }, "networkMode":{ "shape":"NetworkMode", - "documentation":"The Docker networking mode to use for the containers in the task. The valid values are none
, bridge
, awsvpc
, and host
. If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default>
or awsvpc
can be used. If the network mode is set to none
, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host
and awsvpc
network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container ports are mapped directly to the corresponding host port (for the host
network mode) or the attached elastic network interface port (for the awsvpc
network mode), so you cannot take advantage of dynamic host port mappings.
When using the host
network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.
For more information, see Network settings in the Docker run reference.
" + "documentation":"The Docker networking mode to use for the containers in the task. The valid values are none
, bridge
, awsvpc
, and host
. If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default>
or awsvpc
can be used. If the network mode is set to none
, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host
and awsvpc
network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container ports are mapped directly to the corresponding host port (for the host
network mode) or the attached elastic network interface port (for the awsvpc
network mode), so you cannot take advantage of dynamic host port mappings.
When using the host
network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.
The process namespace to use for the containers in the task. The valid values are host
or task
. On Fargate for Linux containers, the only valid value is task
. For example, monitoring sidecars might need pidMode
to access information about other containers running in the same task.
If host
is specified, all containers within the tasks that specified the host
PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.
If task
is specified, all containers within the specified task share the same process namespace.
If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference.
If the host
PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security.
This parameter is not supported for Windows containers.
This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0
or later (Linux). This isn't supported for Windows containers on Fargate.
The process namespace to use for the containers in the task. The valid values are host
or task
. On Fargate for Linux containers, the only valid value is task
. For example, monitoring sidecars might need pidMode
to access information about other containers running in the same task.
If host
is specified, all containers within the tasks that specified the host
PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.
If task
is specified, all containers within the specified task share the same process namespace.
If no value is specified, the default is a private namespace for each container.
If the host
PID mode is used, there's a heightened risk of undesired process namespace exposure.
This parameter is not supported for Windows containers.
This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0
or later (Linux). This isn't supported for Windows containers on Fargate.
The IPC resource namespace to use for the containers in the task. The valid values are host
, task
, or none
. If host
is specified, then all containers within the tasks that specified the host
IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task
is specified, all containers within the specified task share the same IPC resources. If none
is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.
If the host
IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.
If you are setting namespaced kernel parameters using systemControls
for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related systemControls
will apply to all containers within a task.
This parameter is not supported for Windows containers or tasks run on Fargate.
The IPC resource namespace to use for the containers in the task. The valid values are host
, task
, or none
. If host
is specified, then all containers within the tasks that specified the host
IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task
is specified, all containers within the specified task share the same IPC resources. If none
is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.
If the host
IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.
If you are setting namespaced kernel parameters using systemControls
for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related systemControls
will apply to all containers within a task.
This parameter is not supported for Windows containers or tasks run on Fargate.
An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy
parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy
value. Up to 128 letters (uppercase and lowercase), numbers, hyphens (-), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter contains the deployment ID of the service that starts it.
An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy
parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy
value. Up to 128 letters (uppercase and lowercase), numbers, hyphens (-), forward slash (/), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter contains the deployment ID of the service that starts it.
A full description of the tasks that were run. The tasks that were successfully placed on your cluster are described here.
" + "documentation":"A full description of the tasks that were run. The tasks that were successfully placed on your cluster are described here.
" }, "failures":{ "shape":"Failures", @@ -5470,7 +5497,7 @@ }, "startedBy":{ "shape":"String", - "documentation":"An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy
parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy
value. Up to 36 letters (uppercase and lowercase), numbers, hyphens (-), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, the startedBy
parameter contains the deployment ID of the service that starts it.
An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy
parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy
value. Up to 36 letters (uppercase and lowercase), numbers, hyphens (-), forward slash (/), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, the startedBy
parameter contains the deployment ID of the service that starts it.
The namespaced kernel parameter to set a value
for.
Valid IPC namespace values: \"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\"
, and Sysctls
that start with \"fs.mqueue.*\"
Valid network namespace values: Sysctls
that start with \"net.*\"
All of these values are supported by Fargate.
" } }, - "documentation":"A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls
in the Create a container section of the Docker Remote API and the --sysctl
option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time
setting to maintain longer lived connections.
We don't recommend that you specify network-related systemControls
parameters for multiple containers in a single task that also uses either the awsvpc
or host
network mode. Doing this has the following disadvantages:
For tasks that use the awsvpc
network mode including Fargate, if you set systemControls
for any container, it applies to all containers in the task. If you set different systemControls
for multiple containers in a single task, the container that's started last determines which systemControls
take effect.
For tasks that use the host
network mode, the network namespace systemControls
aren't supported.
If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see IPC mode.
For tasks that use the host
IPC mode, IPC namespace systemControls
aren't supported.
For tasks that use the task
IPC mode, IPC namespace systemControls
values apply to all containers within a task.
This parameter is not supported for Windows containers.
This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0
or later (Linux). This isn't supported for Windows containers on Fargate.
A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls
in tthe docker create-container command and the --sysctl
option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time
setting to maintain longer lived connections.
We don't recommend that you specify network-related systemControls
parameters for multiple containers in a single task that also uses either the awsvpc
or host
network mode. Doing this has the following disadvantages:
For tasks that use the awsvpc
network mode including Fargate, if you set systemControls
for any container, it applies to all containers in the task. If you set different systemControls
for multiple containers in a single task, the container that's started last determines which systemControls
take effect.
For tasks that use the host
network mode, the network namespace systemControls
aren't supported.
If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see IPC mode.
For tasks that use the host
IPC mode, IPC namespace systemControls
aren't supported.
For tasks that use the task
IPC mode, IPC namespace systemControls
values apply to all containers within a task.
This parameter is not supported for Windows containers.
This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0
or later (Linux). This isn't supported for Windows containers on Fargate.
The specified target wasn't found. You can view your available container instances with ListContainerInstances. Amazon ECS container instances are cluster-specific and Region-specific.
", + "documentation":"The specified target wasn't found. You can view your available container instances with ListContainerInstances. Amazon ECS container instances are cluster-specific and Region-specific.
", "exception":true }, "TargetType":{ @@ -5928,15 +5955,15 @@ }, "taskRoleArn":{ "shape":"String", - "documentation":"The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the task permission to call Amazon Web Services APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.
IAM roles for tasks on Windows require that the -EnableTaskIAMRole
option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see Windows IAM roles for tasks in the Amazon Elastic Container Service Developer Guide.
The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the task permission to call Amazon Web Services APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
" }, "executionRoleArn":{ "shape":"String", - "documentation":"The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. The task execution IAM role is required depending on the requirements of your task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic Container Service Developer Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
" }, "networkMode":{ "shape":"NetworkMode", - "documentation":"The Docker networking mode to use for the containers in the task. The valid values are none
, bridge
, awsvpc
, and host
. If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default>
or awsvpc
can be used. If the network mode is set to none
, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host
and awsvpc
network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container ports are mapped directly to the corresponding host port (for the host
network mode) or the attached elastic network interface port (for the awsvpc
network mode), so you cannot take advantage of dynamic host port mappings.
When using the host
network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.
For more information, see Network settings in the Docker run reference.
" + "documentation":"The Docker networking mode to use for the containers in the task. The valid values are none
, bridge
, awsvpc
, and host
. If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, <default>
or awsvpc
can be used. If the network mode is set to none
, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host
and awsvpc
network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container ports are mapped directly to the corresponding host port (for the host
network mode) or the attached elastic network interface port (for the awsvpc
network mode), so you cannot take advantage of dynamic host port mappings.
When using the host
network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.
The number of cpu
units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the memory
parameter.
The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate.
256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or later.
The number of cpu
units used by the task. If you use the EC2 launch type, this field is optional. Any value can be used. If you use the Fargate launch type, this field is required. You must use one of the following values. The value that you choose determines your range of valid values for the memory
parameter.
If you use the EC2 launch type, this field is optional. Supported values are between 128
CPU units (0.125
vCPUs) and 10240
CPU units (10
vCPUs).
The CPU units cannot be less than 1 vCPU when you use Windows containers on Fargate.
256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or later.
The process namespace to use for the containers in the task. The valid values are host
or task
. On Fargate for Linux containers, the only valid value is task
. For example, monitoring sidecars might need pidMode
to access information about other containers running in the same task.
If host
is specified, all containers within the tasks that specified the host
PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.
If task
is specified, all containers within the specified task share the same process namespace.
If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference.
If the host
PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security.
This parameter is not supported for Windows containers.
This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0
or later (Linux). This isn't supported for Windows containers on Fargate.
The process namespace to use for the containers in the task. The valid values are host
or task
. On Fargate for Linux containers, the only valid value is task
. For example, monitoring sidecars might need pidMode
to access information about other containers running in the same task.
If host
is specified, all containers within the tasks that specified the host
PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.
If task
is specified, all containers within the specified task share the same process namespace.
If no value is specified, the default is a private namespace for each container.
If the host
PID mode is used, there's a heightened risk of undesired process namespace exposure.
This parameter is not supported for Windows containers.
This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0
or later (Linux). This isn't supported for Windows containers on Fargate.
The IPC resource namespace to use for the containers in the task. The valid values are host
, task
, or none
. If host
is specified, then all containers within the tasks that specified the host
IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task
is specified, all containers within the specified task share the same IPC resources. If none
is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.
If the host
IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security.
If you are setting namespaced kernel parameters using systemControls
for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related systemControls
will apply to all containers within a task.
This parameter is not supported for Windows containers or tasks run on Fargate.
The IPC resource namespace to use for the containers in the task. The valid values are host
, task
, or none
. If host
is specified, then all containers within the tasks that specified the host
IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task
is specified, all containers within the specified task share the same IPC resources. If none
is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.
If the host
IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.
If you are setting namespaced kernel parameters using systemControls
for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related systemControls
will apply to all containers within a task.
This parameter is not supported for Windows containers or tasks run on Fargate.
The hard limit for the ulimit
type.
The ulimit
settings to pass to the container.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 1024
and the default hard limit is 65535
.
You can specify the ulimit
settings for a container in a task definition.
The ulimit
settings to pass to the container.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 65535
and the default hard limit is 65535
.
You can specify the ulimit
settings for a container in a task definition.
Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).
The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.
If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.
When you create the IAM OIDC provider, you specify the following:
The URL of the OIDC identity provider (IdP) to trust
A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider
A list of tags that are attached to the specified IAM OIDC provider
A list of thumbprints of one or more server certificates that the IdP uses
You get all of this information from the OIDC IdP you want to use to access Amazon Web Services.
Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub, GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint.
The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.
Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).
The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.
If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.
When you create the IAM OIDC provider, you specify the following:
The URL of the OIDC identity provider (IdP) to trust
A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider
A list of tags that are attached to the specified IAM OIDC provider
A list of thumbprints of one or more server certificates that the IdP uses
You get all of this information from the OIDC IdP you want to use to access Amazon Web Services.
Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed by one of these trusted CAs, only then we secure communication using the thumbprints set in the IdP's configuration.
The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.
Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the Amazon Web Services Sign-In User Guide.
" + "documentation":"Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, deleting, and listing an Amazon Web Services account alias in the IAM User Guide.
" }, "ListAttachedGroupPolicies":{ "name":"ListAttachedGroupPolicies", @@ -2426,7 +2426,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.
The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)
Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.
Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted root certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation. These OIDC IdPs include Auth0, GitHub, GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint.
Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint
operation to highly privileged users.
Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.
The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)
Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.
Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed by one of these trusted CAs, only then we secure communication using the thumbprints set in the IdP's configuration.
Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint
operation to highly privileged users.
This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to AbortMultipartUpload
:
This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultupartUpload
operation to abort all the in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to AbortMultipartUpload
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not Found
code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.
Directory buckets - You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
All HeadBucket
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory bucket - You must use IAM credentials to authenticate and authorize your access to the HeadBucket
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
General purpose bucket permissions - To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateSession
permission in the Action
element of a policy. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the bucket.
For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not Found
code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.
General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
General purpose bucket permissions - To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateSession
permission in the Action
element of a policy. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the bucket.
For more information about example bucket policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported by directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following actions are related to HeadObject
:
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported by directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The following actions are related to HeadObject
:
This operation is not supported by directory buckets.
Returns a list of all buckets owned by the authenticated sender of the request. To use this operation, you must have the s3:ListAllMyBuckets
permission.
For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.
", @@ -858,7 +859,7 @@ "input":{"shape":"ListMultipartUploadsRequest"}, "output":{"shape":"ListMultipartUploadsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html", - "documentation":"This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload
request, but has not yet been completed or aborted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.
The ListMultipartUploads
operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads
request, the response returns an IsTruncated
element with the value of true
, a NextKeyMarker
element, and a NextUploadIdMarker
element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads
requests. In these requests, include two query parameters: key-marker
and upload-id-marker
. Set the value of key-marker
to the NextKeyMarker
value from the previous response. Similarly, set the value of upload-id-marker
to the NextUploadIdMarker
value from the previous response.
Directory buckets - The upload-id-marker
element and the NextUploadIdMarker
element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker
to the NextKeyMarker
value from the previous response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - In the ListMultipartUploads
response, the multipart uploads are sorted based on two criteria:
Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.
Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.
Directory bucket - In the ListMultipartUploads
response, the multipart uploads aren't sorted lexicographically based on the object keys.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to ListMultipartUploads
:
This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload
request, but has not yet been completed or aborted.
Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads
operation to list the in-progress multipart uploads in the bucket and use the AbortMultupartUpload
operation to abort all the in-progress multipart uploads.
The ListMultipartUploads
operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads
request, the response returns an IsTruncated
element with the value of true
, a NextKeyMarker
element, and a NextUploadIdMarker
element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads
requests. In these requests, include two query parameters: key-marker
and upload-id-marker
. Set the value of key-marker
to the NextKeyMarker
value from the previous response. Similarly, set the value of upload-id-marker
to the NextUploadIdMarker
value from the previous response.
Directory buckets - The upload-id-marker
element and the NextUploadIdMarker
element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker
to the NextKeyMarker
value from the previous response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - In the ListMultipartUploads
response, the multipart uploads are sorted based on two criteria:
Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.
Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.
Directory bucket - In the ListMultipartUploads
response, the multipart uploads aren't sorted lexicographically based on the object keys.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to ListMultipartUploads
:
Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - For general purpose buckets, ListObjectsV2
returns objects in lexicographical order based on their key names.
Directory bucket - For directory buckets, ListObjectsV2
does not return objects in lexicographical order.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.
The following operations are related to ListObjectsV2
:
Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.
General purpose bucket - For general purpose buckets, ListObjectsV2
doesn't return prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, ListObjectsV2
response includes the prefixes that are related only to in-progress multipart uploads.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - For general purpose buckets, ListObjectsV2
returns objects in lexicographical order based on their key names.
Directory bucket - For directory buckets, ListObjectsV2
does not return objects in lexicographical order.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.
The following operations are related to ListObjectsV2
:
This operation is not supported by directory buckets.
This action uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.
This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
This operation is not supported by directory buckets.
This action uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.
If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.
Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
This operation is not supported by directory buckets.
Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
The following operations are related to PutBucketVersioning
:
This operation is not supported by directory buckets.
When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT
or DELETE
) on objects in the bucket.
Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
The following operations are related to PutBucketVersioning
:
The number of years that you want to specify for the default retention period. Must be used with Mode
.
The container element for specifying the default Object Lock retention settings for new objects placed in the specified bucket.
The DefaultRetention
settings require both a mode and a period.
The DefaultRetention
period can be either Days
or Years
but you must select one. You cannot specify Days
and Years
at the same time.
The container element for optionally specifying the default Object Lock retention settings for new objects placed in the specified bucket.
The DefaultRetention
settings require both a mode and a period.
The DefaultRetention
period can be either Days
or Years
but you must select one. You cannot specify Days
and Years
at the same time.
Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key can contain any Unicode character; however, the XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.
", + "documentation":"Encoding type used by Amazon S3 to encode the object keys in the response. Responses are encoded only in UTF-8. An object key can contain any Unicode character. However, the XML 1.0 parser can't parse certain characters, such as characters with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. For more information about characters to avoid in object key names, see Object key naming guidelines.
When using the URL encoding type, non-ASCII characters that are used in an object's key name will be percent-encoded according to UTF-8 code values. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web Services KMS key stored in Amazon Web Services Key Management Service (KMS) for the destination bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.
" } }, - "documentation":"Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects.
" + "documentation":"Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects.
If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.
The Region that the bucket is located.
This functionality is not supported for directory buckets.
The Region that the bucket is located.
", "location":"header", "locationName":"x-amz-bucket-region" }, "AccessPointAlias":{ "shape":"AccessPointAlias", - "documentation":"Indicates whether the bucket name used in the request is an access point alias.
This functionality is not supported for directory buckets.
Indicates whether the bucket name used in the request is an access point alias.
For directory buckets, the value of this field is false
.
The owner of the buckets listed.
" + }, + "ContinuationToken":{ + "shape":"NextToken", + "documentation":" ContinuationToken
is included in the response when there are more buckets that can be listed with pagination. The next ListBuckets
request to Amazon S3 can be continued with this ContinuationToken
. ContinuationToken
is obfuscated and is not a real bucket.
Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response.
", + "location":"querystring", + "locationName":"max-buckets" + }, + "ContinuationToken":{ + "shape":"Token", + "documentation":" ContinuationToken
indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken
is obfuscated and is not a real key. You can use this ContinuationToken
for pagination of the list results.
Length Constraints: Minimum length of 0. Maximum length of 1024.
Required: No.
", + "location":"querystring", + "locationName":"continuation-token" } } }, @@ -6611,7 +6633,7 @@ "members":{ "ContinuationToken":{ "shape":"DirectoryBucketToken", - "documentation":" ContinuationToken
indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken
is obfuscated and is not a real key. You can use this ContinuationToken
for pagination of the list results.
ContinuationToken
indicates to Amazon S3 that the list is being continued on buckets in this account with a token. ContinuationToken
is obfuscated and is not a real bucket name. You can use this ContinuationToken
for the pagination of the list results.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Encoding type used by Amazon S3 to encode the object keys in the response. Responses are encoded only in UTF-8. An object key can contain any Unicode character. However, the XML 1.0 parser can't parse certain characters, such as characters with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. For more information about characters to avoid in object key names, see Object key naming guidelines.
When using the URL encoding type, non-ASCII characters that are used in an object's key name will be percent-encoded according to UTF-8 code values. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Encoding type used by Amazon S3 to encode the object keys in the response. Responses are encoded only in UTF-8. An object key can contain any Unicode character. However, the XML 1.0 parser can't parse certain characters, such as characters with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response. For more information about characters to avoid in object key names, see Object key naming guidelines.
When using the URL encoding type, non-ASCII characters that are used in an object's key name will be percent-encoded according to UTF-8 code values. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Specifies the partition date source for the partitioned prefix. PartitionDateSource can be EventTime or DeliveryTime.
" + "documentation":"Specifies the partition date source for the partitioned prefix. PartitionDateSource
can be EventTime
or DeliveryTime
.
For DeliveryTime
, the time in the log file names corresponds to the delivery time for the log files.
For EventTime
, The logs delivered are for a specific day only. The year, month, and day correspond to the day on which the event occurred, and the hour, minutes and seconds are set to 00 in the key.
Amazon S3 keys for log objects are partitioned in the following format:
[DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
PartitionedPrefix defaults to EventTime delivery when server access logs are delivered.
", @@ -8176,7 +8204,7 @@ }, "RestrictPublicBuckets":{ "shape":"Setting", - "documentation":"Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE
restricts access to this bucket to only Amazon Web Service principals and authorized users within this account if the bucket has a public policy.
Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.
", + "documentation":"Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE
restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has a public policy.
Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.
", "locationName":"RestrictPublicBuckets" } }, @@ -9797,7 +9825,7 @@ "members":{ "Payload":{ "shape":"Body", - "documentation":"The byte array of partial, one or more result records.
", + "documentation":"The byte array of partial, one or more result records. S3 Select doesn't guarantee that a record will be self-contained in one record frame. To ensure continuous streaming of data, S3 Select might split the same record across multiple record frames instead of aggregating the results in memory. Some S3 clients (for example, the SDK for Java) handle this behavior by creating a ByteStream
out of the response by default. Other clients might not handle this behavior by default. In those cases, you must aggregate the results on the client side and parse the response.
Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm
is set to aws:kms
or aws:kms:dsse
.
You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
Key Alias: alias/alias-name
If you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.
If you are using encryption with cross-account or Amazon Web Services service operations you must use a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.
Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.
Describes the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption in the Amazon S3 API Reference.
" + "documentation":"Describes the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption in the Amazon S3 API Reference.
If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.
Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled
element to true
causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.
For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
" } }, - "documentation":"Specifies the default server-side encryption configuration.
" + "documentation":"Specifies the default server-side encryption configuration.
If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner.