From 43ef1bacabc67d81cb1385f3e34ef4930b1af25a Mon Sep 17 00:00:00 2001 From: awssdkgo Date: Wed, 17 Mar 2021 18:16:53 +0000 Subject: [PATCH] Release v1.37.33 (2021-03-17) === ### Service Client Updates * `service/batch`: Updates service API and documentation * Making serviceRole an optional parameter when creating a compute environment. If serviceRole is not provided then Service Linked Role will be created (or reused if it already exists). * `service/sagemaker`: Updates service API and documentation * Support new target device ml_eia2 in SageMaker CreateCompilationJob API --- CHANGELOG.md | 9 + aws/endpoints/defaults.go | 58 ++- aws/version.go | 2 +- models/apis/batch/2016-08-10/api-2.json | 3 +- models/apis/batch/2016-08-10/docs-2.json | 94 ++--- models/apis/sagemaker/2017-07-24/api-2.json | 1 + models/apis/sagemaker/2017-07-24/docs-2.json | 4 +- models/endpoints/endpoints.json | 46 ++- service/batch/api.go | 351 ++++++++++--------- service/batch/doc.go | 12 +- service/sagemaker/api.go | 29 ++ 11 files changed, 373 insertions(+), 236 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38d6871bdb9..d5269562ddb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +Release v1.37.33 (2021-03-17) +=== + +### Service Client Updates +* `service/batch`: Updates service API and documentation + * Making serviceRole an optional parameter when creating a compute environment. If serviceRole is not provided then Service Linked Role will be created (or reused if it already exists). +* `service/sagemaker`: Updates service API and documentation + * Support new target device ml_eia2 in SageMaker CreateCompilationJob API + Release v1.37.32 (2021-03-16) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 6f4c32fa052..05cbff6294e 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -1004,9 +1004,11 @@ var awsPartition = partition{ "batch": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5019,12 +5021,42 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "rds": service{ @@ -9214,8 +9246,18 @@ var awsusgovPartition = partition{ "ram": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "rds": service{ diff --git a/aws/version.go b/aws/version.go index 2d9312f316f..7bcbec3240b 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.37.32" +const SDKVersion = "1.37.33" diff --git a/models/apis/batch/2016-08-10/api-2.json b/models/apis/batch/2016-08-10/api-2.json index 409cca6ca76..bff1f901462 100644 --- a/models/apis/batch/2016-08-10/api-2.json +++ b/models/apis/batch/2016-08-10/api-2.json @@ -566,8 +566,7 @@ "type":"structure", "required":[ "computeEnvironmentName", - "type", - "serviceRole" + "type" ], "members":{ "computeEnvironmentName":{"shape":"String"}, diff --git a/models/apis/batch/2016-08-10/docs-2.json b/models/apis/batch/2016-08-10/docs-2.json index bf8e0d389e6..5c7d7659b8f 100644 --- a/models/apis/batch/2016-08-10/docs-2.json +++ b/models/apis/batch/2016-08-10/docs-2.json @@ -1,21 +1,21 @@ { "version": "2.0", - "service": "

Using AWS Batch, you can run batch computing workloads on the AWS Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. AWS Batch utilizes the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure, while also adopting a familiar batch computing software approach. Given these advantages, AWS Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly.

As a fully managed service, AWS Batch can run batch computing workloads of any scale. AWS Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With AWS Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems.

", + "service": "

Using AWS Batch, you can run batch computing workloads on the AWS Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. AWS Batch uses the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these advantages, AWS Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping you to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly.

As a fully managed service, AWS Batch can run batch computing workloads of any scale. AWS Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With AWS Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems.

", "operations": { - "CancelJob": "

Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED, PENDING, or RUNNABLE state are canceled. Jobs that have progressed to STARTING or RUNNING are not canceled (but the API operation still succeeds, even if no job is canceled); these jobs must be terminated with the TerminateJob operation.

", - "CreateComputeEnvironment": "

Creates an AWS Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or AWS Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, AWS Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. You can choose either to use EC2 On-Demand Instances and EC2 Spot Instances, or to use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.

Multi-node parallel jobs are not supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility with how you configure your compute resources. For example, you can use custom AMI. However, you need to verify that your AMI meets the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you have created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, manually launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

AWS Batch doesn't upgrade the AMIs in a compute environment after it's created. For example, it doesn't update the AMIs when a newer version of the Amazon ECS-optimized AMI is available. Therefore, you're responsible for the management of the guest operating system (including updates and security patches) and any additional application software or utilities that you install on the compute resources. To use a new AMI for your AWS Batch jobs, complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

", - "CreateJobQueue": "

Creates an AWS Batch job queue. When you create a job queue, you associate one or more compute environments to the queue and assign an order of preference for the compute environments.

You also set a priority to the job queue that determines the order in which the AWS Batch scheduler places jobs onto its associated compute environments. For example, if a compute environment is associated with more than one job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute environment.

", - "DeleteComputeEnvironment": "

Deletes an AWS Batch compute environment.

Before you can delete a compute environment, you must set its state to DISABLED with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use AWS Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment will end up in an invalid state.

", + "CancelJob": "

Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED, PENDING, or RUNNABLE state are canceled. Jobs that have progressed to STARTING or RUNNING aren't canceled, but the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation.

", + "CreateComputeEnvironment": "

Creates an AWS Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or AWS Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, AWS Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.

Multi-node parallel jobs aren't supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

AWS Batch doesn't upgrade the AMIs in a compute environment after the environment is created. For example, it doesn't update the AMIs when a newer version of the Amazon ECS optimized AMI is available. Therefore, you're responsible for managing the guest operating system (including its updates and security patches) and any additional application software or utilities that you install on the compute resources. To use a new AMI for your AWS Batch jobs, complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

", + "CreateJobQueue": "

Creates an AWS Batch job queue. When you create a job queue, you associate one or more compute environments to the queue and assign an order of preference for the compute environments.

You also set a priority to the job queue that determines the order that the AWS Batch scheduler places jobs onto its associated compute environments. For example, if a compute environment is associated with more than one job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute environment.

", + "DeleteComputeEnvironment": "

Deletes an AWS Batch compute environment.

Before you can delete a compute environment, you must set its state to DISABLED with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use AWS Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment enters an invalid state.

", "DeleteJobQueue": "

Deletes the specified job queue. You must first disable submissions for a queue with the UpdateJobQueue operation. All jobs in the queue are eventually terminated when you delete a job queue. The jobs are terminated at a rate of about 16 jobs each second.

It's not necessary to disassociate compute environments from a queue before submitting a DeleteJobQueue request.

", "DeregisterJobDefinition": "

Deregisters an AWS Batch job definition. Job definitions are permanently deleted after 180 days.

", "DescribeComputeEnvironments": "

Describes one or more of your compute environments.

If you're using an unmanaged compute environment, you can use the DescribeComputeEnvironment operation to determine the ecsClusterArn that you should launch your Amazon ECS container instances into.

", "DescribeJobDefinitions": "

Describes a list of job definitions. You can specify a status (such as ACTIVE) to only return job definitions that match that status.

", "DescribeJobQueues": "

Describes one or more of your job queues.

", "DescribeJobs": "

Describes a list of AWS Batch jobs.

", - "ListJobs": "

Returns a list of AWS Batch jobs.

You must specify only one of the following items:

You can filter the results by job status with the jobStatus parameter. If you don't specify a status, only RUNNING jobs are returned.

", + "ListJobs": "

Returns a list of AWS Batch jobs.

You must specify only one of the following items:

You can filter the results by job status with the jobStatus parameter. If you don't specify a status, only RUNNING jobs are returned.

", "ListTagsForResource": "

Lists the tags for an AWS Batch resource. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "RegisterJobDefinition": "

Registers an AWS Batch job definition.

", - "SubmitJob": "

Submits an AWS Batch job from a job definition. Parameters specified during SubmitJob override parameters defined in the job definition.

Jobs run on Fargate resources don't run for more than 14 days. After 14 days, the Fargate resources might no longer be available and the job is terminated.

", + "SubmitJob": "

Submits an AWS Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory requirements that are specified in the ResourceRequirements objects in the job definition are the exception. They can't be overridden this way using the memory and vcpus parameters. Rather, you must specify updates to job definition parameters in a ResourceRequirements object that's included in the containerOverrides parameter.

Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 days, Fargate resources might become unavailable and job might be terminated.

", "TagResource": "

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags associated with that resource are deleted as well. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "TerminateJob": "

Terminates a job in a job queue. Jobs that are in the STARTING or RUNNING state are terminated, which causes them to transition to FAILED. Jobs that have not progressed to the STARTING state are cancelled.

", "UntagResource": "

Deletes specified tags from an AWS Batch resource.

", @@ -85,7 +85,7 @@ "ContainerProperties$readonlyRootFilesystem": "

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

", "ContainerProperties$privileged": "

When this parameter is true, the container is given elevated permissions on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run. The default value is false.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided, or specified as false.

", "JobDefinition$propagateTags": "

Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state.

", - "JobDetail$propagateTags": "

Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags are not propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state.

", + "JobDetail$propagateTags": "

Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the tasks during task creation. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the FAILED state.

", "LinuxParameters$initProcessEnabled": "

If true, run an init process inside the container that forwards signals and reaps processes. This parameter maps to the --init option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

", "MountPoint$readOnly": "

If this value is true, the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is false.

", "NodeDetails$isMainNode": "

Specifies whether the current node is the main node for a multi-node parallel job.

", @@ -118,7 +118,7 @@ "CRAllocationStrategy": { "base": null, "refs": { - "ComputeResource$allocationStrategy": "

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation Strategies in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

BEST_FIT (default)

AWS Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, AWS Batch will wait for the additional instances to be available. If there are not enough instances available, or if the user is hitting Amazon EC2 service limits then additional jobs aren't run until currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified.

BEST_FIT_PROGRESSIVE

AWS Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, AWS Batch will select new instance types.

SPOT_CAPACITY_OPTIMIZED

AWS Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance.

" + "ComputeResource$allocationStrategy": "

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation Strategies in the AWS Batch User Guide.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

BEST_FIT (default)

AWS Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, AWS Batch waits for the additional instances to be available. If there aren't enough instances available, or if the user is hitting Amazon EC2 service limits then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be specified.

BEST_FIT_PROGRESSIVE

AWS Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, AWS Batch will select new instance types.

SPOT_CAPACITY_OPTIMIZED

AWS Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance.

" } }, "CRType": { @@ -165,7 +165,7 @@ "refs": { "CreateJobQueueRequest$computeEnvironmentOrder": "

The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler uses this parameter to determine which compute environment should run a specific job. Compute environments must be in the VALID state before you can associate them with a job queue. You can associate up to three compute environments with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.

", "JobQueueDetail$computeEnvironmentOrder": "

The compute environments that are attached to the job queue and the order that job placement is preferred. Compute environments are selected for job placement in ascending order.

", - "UpdateJobQueueRequest$computeEnvironmentOrder": "

Details the set of compute environments mapped to a job queue and their order relative to each other. This is one of the parameters used by the job scheduler to determine which compute environment should run a given job. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.

" + "UpdateJobQueueRequest$computeEnvironmentOrder": "

Details the set of compute environments mapped to a job queue and their order relative to each other. This is one of the parameters used by the job scheduler to determine which compute environment should run a given job. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. AWS Batch doesn't support mixing compute environment architecture types in a single job queue.

" } }, "ComputeResource": { @@ -191,7 +191,7 @@ "base": "

The overrides that should be sent to a container.

", "refs": { "NodePropertyOverride$containerOverrides": "

The overrides that should be sent to a node range.

", - "SubmitJobRequest$containerOverrides": "

A list of container overrides in JSON format that specify the name of a container in the specified job definition and the overrides it should receive. You can override the default command for a container (that's specified in the job definition or the Docker image) with a command override. You can also override existing environment variables (that are specified in the job definition or Docker image) on a container or add new environment variables to it with an environment override.

" + "SubmitJobRequest$containerOverrides": "

A list of container overrides in the JSON format that specify the name of a container in the specified job definition and the overrides it should receive. You can override the default command for a container, which is specified in the job definition or the Docker image, with a command override. You can also override existing environment variables on a container or add new environment variables to it with an environment override.

" } }, "ContainerProperties": { @@ -355,7 +355,7 @@ } }, "FargatePlatformConfiguration": { - "base": "

The platform configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

", + "base": "

The platform configuration for jobs running on Fargate resources. For jobs that run on EC2 resources, you shouldn't specify this parameter.

", "refs": { "ContainerDetail$fargatePlatformConfiguration": "

The platform configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

", "ContainerProperties$fargatePlatformConfiguration": "

The platform configuration for jobs running on Fargate resources. Jobs running on EC2 resources must not specify this parameter.

" @@ -391,31 +391,31 @@ "AttemptContainerDetail$exitCode": "

The exit code for the job attempt. A non-zero exit code is considered a failure.

", "ComputeEnvironmentOrder$order": "

The order of the compute environment. Compute environments are tried in ascending order. For example, if two compute environments are associated with a job queue, the compute environment with a lower order integer value is tried for job placement first.

", "ComputeResource$minvCpus": "

The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is DISABLED).

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

", - "ComputeResource$maxvCpus": "

The maximum number of Amazon EC2 vCPUs that a compute environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch will never go above maxvCpus by more than a single instance (e.g., no more than a single instance from among those specified in your compute environment).

", + "ComputeResource$maxvCpus": "

The maximum number of Amazon EC2 vCPUs that a compute environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.

", "ComputeResource$desiredvCpus": "

The desired number of Amazon EC2 vCPUS in the compute environment. AWS Batch modifies this value between the minimum and maximum values, based on job queue demand.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

", "ComputeResource$bidPercentage": "

The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand price.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

", "ComputeResourceUpdate$minvCpus": "

The minimum number of Amazon EC2 vCPUs that an environment should maintain.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

", - "ComputeResourceUpdate$maxvCpus": "

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to go above maxvCpus to meet your capacity requirements. In this event, AWS Batch will never go above maxvCpus by more than a single instance (e.g., no more than a single instance from among those specified in your compute environment).

", + "ComputeResourceUpdate$maxvCpus": "

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, AWS Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, AWS Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.

", "ComputeResourceUpdate$desiredvCpus": "

The desired number of Amazon EC2 vCPUS in the compute environment.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

", - "ContainerDetail$vcpus": "

The number of vCPUs reserved for the container. Jobs running on EC2 resources can specify the vCPU requirement for the job using resourceRequirements but the vCPU requirements can't be specified both here and in the resourceRequirement object. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs running on Fargate resources. Jobs running on Fargate resources must specify the vCPU requirement for the job using resourceRequirements.

", + "ContainerDetail$vcpus": "

The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU requirement for the job using resourceRequirements, but you can't specify the vCPU requirements in both the vcpus and resourceRequirement object. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", "ContainerDetail$memory": "

For jobs run on EC2 resources that didn't specify memory requirements using ResourceRequirement, the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see resourceRequirements.

", "ContainerDetail$exitCode": "

The exit code to return upon completion.

", - "ContainerOverrides$vcpus": "

This parameter is deprecated and not supported for jobs run on Fargate resources, see resourceRequirement. For jobs run on EC2 resources, the number of vCPUs to reserve for the container. This value overrides the value set in the job definition. Jobs run on EC2 resources can specify the vCPU requirement using resourceRequirement but the vCPU requirements can't be specified both here and in resourceRequirement. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided. Jobs running on Fargate resources must specify the vCPU requirement for the job using resourceRequirements.

", - "ContainerOverrides$memory": "

This parameter is deprecated and not supported for jobs run on Fargate resources, use ResourceRequirement. For jobs run on EC2 resource, the number of MiB of memory reserved for the job. This value overrides the value set in the job definition.

", - "ContainerProperties$vcpus": "

This parameter is deprecated and not supported for jobs run on Fargate resources, see resourceRequirement. The number of vCPUs reserved for the container. Jobs running on EC2 resources can specify the vCPU requirement for the job using resourceRequirements but the vCPU requirements can't be specified both here and in the resourceRequirement structure. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided. Jobs running on Fargate resources must specify the vCPU requirement for the job using resourceRequirements.

", - "ContainerProperties$memory": "

This parameter is deprecated and not supported for jobs run on Fargate resources, use ResourceRequirement. For jobs run on EC2 resources can specify the memory requirement using the ResourceRequirement structure. The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places; it must be specified for each node at least once.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

", + "ContainerOverrides$vcpus": "

This parameter indicates the number of vCPUs reserved for the container.It overrides the vcpus parameter that's set in the job definition, but doesn't override any vCPU requirement specified in the resourceRequirement structure in the job definition.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For Fargate resources, you can only use resourceRequirement. For EC2 resources, you can use either this parameter or resourceRequirement but not both.

This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU.

This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", + "ContainerOverrides$memory": "

This parameter indicates the amount of memory (in MiB) that's reserved for the job. It overrides the memory parameter set in the job definition, but doesn't override any memory requirement specified in the ResourceRequirement structure in the job definition.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead.

", + "ContainerProperties$vcpus": "

The number of vCPUs reserved for the job. Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. The number of vCPUs must be specified but can be be specified in several places. You must specify it at least once for each node.

This parameter is supported on EC2 resources but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead. You can use this parameter or resourceRequirements structure but not both.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", + "ContainerProperties$memory": "

This parameter indicates the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it is terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.

This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

This parameter is supported on EC2 resources but isn't supported on Fargate resources. For Fargate resources, you should specify the memory requirement using resourceRequirement. You can do this for EC2 resources.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

", "ContainerSummary$exitCode": "

The exit code to return upon completion.

", "CreateJobQueueRequest$priority": "

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments cannot be mixed.

", "DescribeComputeEnvironmentsRequest$maxResults": "

The maximum number of cluster results returned by DescribeComputeEnvironments in paginated output. When this parameter is used, DescribeComputeEnvironments only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeComputeEnvironments request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeComputeEnvironments returns up to 100 results and a nextToken value if applicable.

", - "DescribeJobDefinitionsRequest$maxResults": "

The maximum number of results returned by DescribeJobDefinitions in paginated output. When this parameter is used, DescribeJobDefinitions only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobDefinitions returns up to 100 results and a nextToken value if applicable.

", - "DescribeJobQueuesRequest$maxResults": "

The maximum number of results returned by DescribeJobQueues in paginated output. When this parameter is used, DescribeJobQueues only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobQueues request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobQueues returns up to 100 results and a nextToken value if applicable.

", + "DescribeJobDefinitionsRequest$maxResults": "

The maximum number of results returned by DescribeJobDefinitions in paginated output. When this parameter is used, DescribeJobDefinitions only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobDefinitions returns up to 100 results and a nextToken value if applicable.

", + "DescribeJobQueuesRequest$maxResults": "

The maximum number of results returned by DescribeJobQueues in paginated output. When this parameter is used, DescribeJobQueues only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobQueues request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobQueues returns up to 100 results and a nextToken value if applicable.

", "JobDefinition$revision": "

The revision of the job definition.

", - "JobQueueDetail$priority": "

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments cannot be mixed.

", + "JobQueueDetail$priority": "

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

", "JobTimeout$attemptDurationSeconds": "

The time duration in seconds (measured from the job attempt's startedAt timestamp) after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds.

", "LinuxParameters$sharedMemorySize": "

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

", "LinuxParameters$maxSwap": "

The total amount of swap memory (in MiB) a container can use. This parameter is translated to the --memory-swap option to docker run where the value is the sum of the container memory plus the maxSwap value. For more information, see --memory-swap details in the Docker documentation.

If a maxSwap value of 0 is specified, the container doesn't use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container doesn't use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

", - "LinuxParameters$swappiness": "

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 causes swapping not to happen unless absolutely necessary. A swappiness value of 100 causes pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter isn't specified, a default value of 60 is used. If a value isn't specified for maxSwap then this parameter is ignored. If maxSwap is set to 0, the container doesn't use swap. This parameter maps to the --memory-swappiness option to docker run.

Consider the following when you use a per-container swap configuration.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

", - "ListJobsRequest$maxResults": "

The maximum number of results returned by ListJobs in paginated output. When this parameter is used, ListJobs only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListJobs returns up to 100 results and a nextToken value if applicable.

", + "LinuxParameters$swappiness": "

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 causes swapping not to happen unless absolutely necessary. A swappiness value of 100 causes pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter isn't specified, a default value of 60 is used. If a value isn't specified for maxSwap then this parameter is ignored. If maxSwap is set to 0, the container doesn't use swap. This parameter maps to the --memory-swappiness option to docker run.

Consider the following when you use a per-container swap configuration.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

", + "ListJobsRequest$maxResults": "

The maximum number of results returned by ListJobs in paginated output. When this parameter is used, ListJobs only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListJobs returns up to 100 results and a nextToken value if applicable.

", "NodeDetails$nodeIndex": "

The node index for the node. Node index numbering begins at zero. This index is also available on the node with the AWS_BATCH_JOB_NODE_INDEX environment variable.

", "NodeOverrides$numNodes": "

The number of nodes to use with a multi-node parallel job. This value overrides the number of nodes that are specified in the job definition. To use this override:

", "NodeProperties$numNodes": "

The number of nodes associated with a multi-node parallel job.

", @@ -427,7 +427,7 @@ "Tmpfs$size": "

The size (in MiB) of the tmpfs volume.

", "Ulimit$hardLimit": "

The hard limit for the ulimit type.

", "Ulimit$softLimit": "

The soft limit for the ulimit type.

", - "UpdateJobQueueRequest$priority": "

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments cannot be mixed.

" + "UpdateJobQueueRequest$priority": "

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). EC2 and Fargate compute environments can't be mixed.

" } }, "JQState": { @@ -435,7 +435,7 @@ "refs": { "CreateJobQueueRequest$state": "

The state of the job queue. If the job queue state is ENABLED, it is able to accept jobs. If the job queue state is DISABLED, new jobs can't be added to the queue, but jobs already in the queue can finish.

", "JobQueueDetail$state": "

Describes the ability of the queue to accept new jobs. If the job queue state is ENABLED, it's able to accept jobs. If the job queue state is DISABLED, new jobs can't be added to the queue, but jobs already in the queue can finish.

", - "UpdateJobQueueRequest$state": "

Describes the queue's ability to accept new jobs. If the job queue state is ENABLED, it is able to accept jobs. If the job queue state is DISABLED, new jobs cannot be added to the queue, but jobs already in the queue can finish.

" + "UpdateJobQueueRequest$state": "

Describes the queue's ability to accept new jobs. If the job queue state is ENABLED, it can accept jobs. If the job queue state is DISABLED, new jobs can't be added to the queue, but jobs already in the queue can finish.

" } }, "JQStatus": { @@ -570,7 +570,7 @@ "LogConfiguration": { "base": "

Log configuration options to send to a custom log driver for the container.

", "refs": { - "ContainerDetail$logConfiguration": "

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance. Or, alternatively, it must be configured on a different log server for remote logging options. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers might be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

", + "ContainerDetail$logConfiguration": "

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance. Or, alternatively, it must be configured on a different log server for remote logging options. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers might be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

", "ContainerProperties$logConfiguration": "

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type).

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" } }, @@ -583,7 +583,7 @@ "LogDriver": { "base": null, "refs": { - "LogConfiguration$logDriver": "

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

The supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

Jobs running on Fargate resources are restricted to the awslogs and splunk log drivers.

awslogs

Specifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs Log Driver in the AWS Batch User Guide and Amazon CloudWatch Logs logging driver in the Docker documentation.

fluentd

Specifies the Fluentd logging driver. For more information, including usage and options, see Fluentd logging driver in the Docker documentation.

gelf

Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and options, see Graylog Extended Format logging driver in the Docker documentation.

journald

Specifies the journald logging driver. For more information, including usage and options, see Journald logging driver in the Docker documentation.

json-file

Specifies the JSON file logging driver. For more information, including usage and options, see JSON File logging driver in the Docker documentation.

splunk

Specifies the Splunk logging driver. For more information, including usage and options, see Splunk logging driver in the Docker documentation.

syslog

Specifies the syslog logging driver. For more information, including usage and options, see Syslog logging driver in the Docker documentation.

If you have a custom driver that'sn't listed earlier that you want to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

" + "LogConfiguration$logDriver": "

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

The supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

Jobs running on Fargate resources are restricted to the awslogs and splunk log drivers.

awslogs

Specifies the Amazon CloudWatch Logs logging driver. For more information, see Using the awslogs Log Driver in the AWS Batch User Guide and Amazon CloudWatch Logs logging driver in the Docker documentation.

fluentd

Specifies the Fluentd logging driver. For more information, including usage and options, see Fluentd logging driver in the Docker documentation.

gelf

Specifies the Graylog Extended Format (GELF) logging driver. For more information, including usage and options, see Graylog Extended Format logging driver in the Docker documentation.

journald

Specifies the journald logging driver. For more information, including usage and options, see Journald logging driver in the Docker documentation.

json-file

Specifies the JSON file logging driver. For more information, including usage and options, see JSON File logging driver in the Docker documentation.

splunk

Specifies the Splunk logging driver. For more information, including usage and options, see Splunk logging driver in the Docker documentation.

syslog

Specifies the syslog logging driver. For more information, including usage and options, see Syslog logging driver in the Docker documentation.

If you have a custom driver that's not listed earlier that you want to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that's available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you want to have included. However, Amazon Web Services doesn't currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log into your container instance and run the following command: sudo docker version | grep \"Server API version\"

" } }, "Long": { @@ -738,7 +738,7 @@ "RetryAction": { "base": null, "refs": { - "EvaluateOnExit$action": "

Specifies the action to take if all of the specified conditions (onStatusReason, onReason, and onExitCode) are met. The values are not case sensitive.

" + "EvaluateOnExit$action": "

Specifies the action to take if all of the specified conditions (onStatusReason, onReason, and onExitCode) are met. The values aren't case sensitive.

" } }, "RetryStrategy": { @@ -804,12 +804,12 @@ "ContainerOverrides$instanceType": "

The instance type to use for a multi-node parallel job.

This parameter isn't applicable to single-node container jobs or for jobs running on Fargate resources and shouldn't be provided.

", "ContainerProperties$image": "

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with repository-url/image:tag . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

Docker image architecture must match the processor architecture of the compute resources that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based compute resources.

", "ContainerProperties$jobRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

", - "ContainerProperties$executionRoleArn": "

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. Jobs running on Fargate resources must provide an execution role. For more information, see AWS Batch execution IAM role in the AWS Batch User Guide.

", + "ContainerProperties$executionRoleArn": "

The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see AWS Batch execution IAM role in the AWS Batch User Guide.

", "ContainerProperties$user": "

The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

", - "ContainerProperties$instanceType": "

The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.

This parameter isn't applicable to single-node container jobs or for jobs running on Fargate resources and shouldn't be provided.

", + "ContainerProperties$instanceType": "

The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.

This parameter isn't applicable to single-node container jobs or for jobs that run on Fargate resources and shouldn't be provided.

", "ContainerSummary$reason": "

A short (255 max characters) human-readable string to provide additional details about a running or stopped container.

", "CreateComputeEnvironmentRequest$computeEnvironmentName": "

The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", - "CreateComputeEnvironmentRequest$serviceRole": "

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

", + "CreateComputeEnvironmentRequest$serviceRole": "

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If your account has already created the AWS Batch service-linked role, that role is used by default for your compute environment unless you specify a role here. If the AWS Batch service-linked role does not exist in your account, and no role is specified here, the service will try to create the AWS Batch service-linked role in your account.

If your specified role has a path other than /, then you must specify either the full role ARN (recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly names and paths in the IAM User Guide.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

", "CreateComputeEnvironmentResponse$computeEnvironmentName": "

The name of the compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", "CreateComputeEnvironmentResponse$computeEnvironmentArn": "

The Amazon Resource Name (ARN) of the compute environment.

", "CreateJobQueueRequest$jobQueueName": "

The name of the job queue. Up to 128 letters (uppercase and lowercase), numbers, and underscores are allowed.

", @@ -827,12 +827,12 @@ "DescribeJobQueuesRequest$nextToken": "

The nextToken value returned from a previous paginated DescribeJobQueues request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

This token should be treated as an opaque identifier that's only used to retrieve the next items in a list and not for other programmatic purposes.

", "DescribeJobQueuesResponse$nextToken": "

The nextToken value to include in a future DescribeJobQueues request. When the results of a DescribeJobQueues request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", "Device$hostPath": "

The path for the device on the host container instance.

", - "Device$containerPath": "

The path inside the container used to expose the host device. By default the hostPath value is used.

", - "EvaluateOnExit$onStatusReason": "

Contains a glob pattern to match against the StatusReason returned for a job. The patten can be up to 512 characters long, can contain letters, numbers, periods (.), colons (:), and white space (spaces, tabs). and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

", - "EvaluateOnExit$onReason": "

Contains a glob pattern to match against the Reason returned for a job. The patten can be up to 512 characters long, can contain letters, numbers, periods (.), colons (:), and white space (spaces, tabs), and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

", - "EvaluateOnExit$onExitCode": "

Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. The patten can be up to 512 characters long, can contain only numbers, and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

", - "FargatePlatformConfiguration$platformVersion": "

The AWS Fargate platform version on which the jobs are running. A platform version is specified only for jobs running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This will use a recent, approved version of the AWS Fargate platform for compute resources. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

", - "Host$sourcePath": "

The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

This parameter isn't applicable to jobs running on Fargate resources and shouldn't be provided.

", + "Device$containerPath": "

The path inside the container used to expose the host device. By default, the hostPath value is used.

", + "EvaluateOnExit$onStatusReason": "

Contains a glob pattern to match against the StatusReason returned for a job. The pattern can be up to 512 characters long, and can contain letters, numbers, periods (.), colons (:), and white space (including spaces or tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

", + "EvaluateOnExit$onReason": "

Contains a glob pattern to match against the Reason returned for a job. The pattern can be up to 512 characters long, and can contain letters, numbers, periods (.), colons (:), and white space (including spaces and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

", + "EvaluateOnExit$onExitCode": "

Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. The pattern can be up to 512 characters long, can contain only numbers, and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

", + "FargatePlatformConfiguration$platformVersion": "

The AWS Fargate platform version where the jobs are running. A platform version is specified only for jobs running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This uses a recent, approved version of the AWS Fargate platform for compute resources. For more information, see AWS Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

", + "Host$sourcePath": "

The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided.

", "JobDefinition$jobDefinitionName": "

The name of the job definition.

", "JobDefinition$jobDefinitionArn": "

The Amazon Resource Name (ARN) for the job definition.

", "JobDefinition$status": "

The status of the job definition.

", @@ -876,13 +876,13 @@ "RegisterJobDefinitionRequest$jobDefinitionName": "

The name of the job definition to register. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", "RegisterJobDefinitionResponse$jobDefinitionName": "

The name of the job definition.

", "RegisterJobDefinitionResponse$jobDefinitionArn": "

The Amazon Resource Name (ARN) of the job definition.

", - "ResourceRequirement$value": "

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs are not available for jobs running on Fargate resources.

type=\"MEMORY\"

For jobs running on EC2 resources, the hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

For jobs running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

VCPU = 2 or 4

value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

VCPU = 4

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

For jobs running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

", + "ResourceRequirement$value": "

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs are not available for jobs running on Fargate resources.

type=\"MEMORY\"

The memory hard limit (in MiB) present to the container. This parameter is supported for jobs running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the AWS Batch User Guide.

For jobs running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

VCPU = 2 or 4

value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

VCPU = 4

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

For jobs running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

", "Secret$name": "

The name of the secret.

", - "Secret$valueFrom": "

The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the job you are launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

", + "Secret$valueFrom": "

The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

", "ServerException$message": null, "StringList$member": null, "SubmitJobRequest$jobName": "

The name of the job. The first character must be alphanumeric, and up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", - "SubmitJobRequest$jobQueue": "

The job queue into which the job is submitted. You can specify either the name or the Amazon Resource Name (ARN) of the queue.

", + "SubmitJobRequest$jobQueue": "

The job queue where the job is submitted. You can specify either the name or the Amazon Resource Name (ARN) of the queue.

", "SubmitJobRequest$jobDefinition": "

The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. If name is specified without a revision then the latest active revision is used.

", "SubmitJobResponse$jobArn": "

The Amazon Resource Name (ARN) for the job.

", "SubmitJobResponse$jobName": "

The name of the job.

", @@ -896,7 +896,7 @@ "Ulimit$name": "

The type of the ulimit.

", "UntagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource from which to delete tags. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "UpdateComputeEnvironmentRequest$computeEnvironment": "

The name or full Amazon Resource Name (ARN) of the compute environment to update.

", - "UpdateComputeEnvironmentRequest$serviceRole": "

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN does not use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

", + "UpdateComputeEnvironmentRequest$serviceRole": "

The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see AWS Batch service IAM role in the AWS Batch User Guide.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your AWS Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

", "UpdateComputeEnvironmentResponse$computeEnvironmentName": "

The name of the compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", "UpdateComputeEnvironmentResponse$computeEnvironmentArn": "

The Amazon Resource Name (ARN) of the compute environment.

", "UpdateJobQueueRequest$jobQueue": "

The name or the Amazon Resource Name (ARN) of the job queue.

", @@ -908,10 +908,10 @@ "StringList": { "base": null, "refs": { - "ComputeResource$instanceTypes": "

The instances types that can be launched. You can specify instance families to launch any instance type within those families (for example, c5 or p3), or you can specify specific sizes within a family (such as c5.8xlarge). You can also choose optimal to select instance types (from the C4, M4, and R4 instance families) on the fly that match the demand of your job queues.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment.

Currently, optimal uses instance types from the C4, M4, and R4 instance families. In Regions that don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are used.

", - "ComputeResource$subnets": "

The VPC subnets into which the compute resources are launched. These subnets must be within the same VPC. This parameter is required for jobs running on Fargate resources, where it can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

", - "ComputeResource$securityGroupIds": "

The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds or using a launch template referenced in launchTemplate. This parameter is required for jobs running on Fargate resources and must contain at least one security group. (Fargate does not support launch templates.) If security groups are specified using both securityGroupIds and launchTemplate, the values in securityGroupIds will be used.

", - "ComputeResourceUpdate$subnets": "

The VPC subnets that the compute resources are launched into. This parameter is required for jobs running on Fargate compute resources, where it can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide. This can't be specified for EC2 compute resources. Providing an empty list will be handled as if this parameter wasn't specified and no change is made.

", + "ComputeResource$instanceTypes": "

The instances types that can be launched. You can specify instance families to launch any instance type within those families (for example, c5 or p3), or you can specify specific sizes within a family (such as c5.8xlarge). You can also choose optimal to select instance types (from the C4, M4, and R4 instance families) that match the demand of your job queues.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment.

Currently, optimal uses instance types from the C4, M4, and R4 instance families. In Regions that don't have instance types from those instance families, instance types from the C5, M5. and R5 instance families are used.

", + "ComputeResource$subnets": "

The VPC subnets into which the compute resources are launched. These subnets must be within the same VPC. Fargate compute resources can contain up to 16 subnets. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

", + "ComputeResource$securityGroupIds": "

The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security groups must be specified, either in securityGroupIds or using a launch template referenced in launchTemplate. This parameter is required for jobs running on Fargate resources and must contain at least one security group. Fargate doesn't support launch templates. If security groups are specified using both securityGroupIds and launchTemplate, the values in securityGroupIds is used.

", + "ComputeResourceUpdate$subnets": "

The VPC subnets that the compute resources are launched into. Fargate compute resources can contain up to 16 subnets. Providing an empty list will be handled as if this parameter wasn't specified and no change is made. This can't be specified for EC2 compute resources. For more information, see VPCs and Subnets in the Amazon VPC User Guide.

", "ComputeResourceUpdate$securityGroupIds": "

The Amazon EC2 security groups associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. This can't be specified for EC2 compute resources. Providing an empty list is handled as if this parameter wasn't specified and no change is made.

", "ContainerDetail$command": "

The command that's passed to the container.

", "ContainerOverrides$command": "

The command to send to the container that overrides the default command from the Docker image or the job definition.

", @@ -980,7 +980,7 @@ "TagsMap": { "base": null, "refs": { - "ComputeResource$tags": "

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For AWS Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value−for example, { \"Name\": \"AWS Batch Instance - C4OnDemand\" }. This is helpful for recognizing your AWS Batch instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment has been created; any changes require creating a new compute environment and removing the old compute environment. These tags are not seen when using the AWS Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" + "ComputeResource$tags": "

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For AWS Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value−for example, { \"Name\": \"AWS Batch Instance - C4OnDemand\" }. This is helpful for recognizing your AWS Batch instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment has been created; any changes require creating a new compute environment and removing the old compute environment. These tags aren't seen when using the AWS Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs running on Fargate resources, and shouldn't be specified.

" } }, "TerminateJobRequest": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 734e339e19f..5ed329b3d98 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -13284,6 +13284,7 @@ "ml_p3", "ml_g4dn", "ml_inf1", + "ml_eia2", "jetson_tx1", "jetson_tx2", "jetson_nano", diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 57d35d6d552..9c7191546c6 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -1394,7 +1394,7 @@ "CompilerOptions": { "base": null, "refs": { - "OutputConfig$CompilerOptions": "

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

" + "OutputConfig$CompilerOptions": "

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

" } }, "CompressionType": { @@ -2130,7 +2130,7 @@ "DataInputConfig": { "base": null, "refs": { - "InputConfig$DataInputConfig": "

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice (ML Model format):

CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:

" + "InputConfig$DataInputConfig": "

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice (ML Model format):

CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:

Depending on the model format, DataInputConfig requires the following parameters for ml_eia2 OutputConfig:TargetDevice.

" } }, "DataProcessing": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 0a1b8a5fc4d..3cdb8e38c68 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -886,9 +886,11 @@ }, "batch" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -4817,6 +4819,36 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "ram-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "ram-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "ram-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "ram-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "ram-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -8807,8 +8839,18 @@ }, "ram" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "ram.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "ram.us-gov-west-1.amazonaws.com" + } } }, "rds" : { diff --git a/service/batch/api.go b/service/batch/api.go index e19c44cf26a..e0a94af1ac4 100644 --- a/service/batch/api.go +++ b/service/batch/api.go @@ -59,9 +59,8 @@ func (c *Batch) CancelJobRequest(input *CancelJobInput) (req *request.Request, o // // Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED, // PENDING, or RUNNABLE state are canceled. Jobs that have progressed to STARTING -// or RUNNING are not canceled (but the API operation still succeeds, even if -// no job is canceled); these jobs must be terminated with the TerminateJob -// operation. +// or RUNNING aren't canceled, but the API operation still succeeds, even if +// no job is canceled. These jobs must be terminated with the TerminateJob operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -152,33 +151,34 @@ func (c *Batch) CreateComputeEnvironmentRequest(input *CreateComputeEnvironmentI // In a managed compute environment, AWS Batch manages the capacity and instance // types of the compute resources within the environment. This is based on the // compute resource specification that you define or the launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) -// that you specify when you create the compute environment. You can choose -// either to use EC2 On-Demand Instances and EC2 Spot Instances, or to use Fargate -// and Fargate Spot capacity in your managed compute environment. You can optionally -// set a maximum price so that Spot Instances only launch when the Spot Instance -// price is less than a specified percentage of the On-Demand price. +// that you specify when you create the compute environment. Either, you can +// choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can +// use Fargate and Fargate Spot capacity in your managed compute environment. +// You can optionally set a maximum price so that Spot Instances only launch +// when the Spot Instance price is less than a specified percentage of the On-Demand +// price. // -// Multi-node parallel jobs are not supported on Spot Instances. +// Multi-node parallel jobs aren't supported on Spot Instances. // // In an unmanaged compute environment, you can manage your own EC2 compute // resources and have a lot of flexibility with how you configure your compute -// resources. For example, you can use custom AMI. However, you need to verify -// that your AMI meets the Amazon ECS container instance AMI specification. +// resources. For example, you can use custom AMIs. However, you must verify +// that each of your AMIs meet the Amazon ECS container instance AMI specification. // For more information, see container instance AMIs (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container_instance_AMIs.html) -// in the Amazon Elastic Container Service Developer Guide. After you have created +// in the Amazon Elastic Container Service Developer Guide. After you created // your unmanaged compute environment, you can use the DescribeComputeEnvironments // operation to find the Amazon ECS cluster that's associated with it. Then, -// manually launch your container instances into that Amazon ECS cluster. For -// more information, see Launching an Amazon ECS container instance (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html) +// launch your container instances into that Amazon ECS cluster. For more information, +// see Launching an Amazon ECS container instance (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_container_instance.html) // in the Amazon Elastic Container Service Developer Guide. // -// AWS Batch doesn't upgrade the AMIs in a compute environment after it's created. -// For example, it doesn't update the AMIs when a newer version of the Amazon -// ECS-optimized AMI is available. Therefore, you're responsible for the management -// of the guest operating system (including updates and security patches) and -// any additional application software or utilities that you install on the -// compute resources. To use a new AMI for your AWS Batch jobs, complete these -// steps: +// AWS Batch doesn't upgrade the AMIs in a compute environment after the environment +// is created. For example, it doesn't update the AMIs when a newer version +// of the Amazon ECS optimized AMI is available. Therefore, you're responsible +// for managing the guest operating system (including its updates and security +// patches) and any additional application software or utilities that you install +// on the compute resources. To use a new AMI for your AWS Batch jobs, complete +// these steps: // // Create a new compute environment with the new AMI. // @@ -274,8 +274,8 @@ func (c *Batch) CreateJobQueueRequest(input *CreateJobQueueInput) (req *request. // one or more compute environments to the queue and assign an order of preference // for the compute environments. // -// You also set a priority to the job queue that determines the order in which -// the AWS Batch scheduler places jobs onto its associated compute environments. +// You also set a priority to the job queue that determines the order that the +// AWS Batch scheduler places jobs onto its associated compute environments. // For example, if a compute environment is associated with more than one job // queue, the job queue with a higher priority is given preference for scheduling // jobs to that compute environment. @@ -370,7 +370,7 @@ func (c *Batch) DeleteComputeEnvironmentRequest(input *DeleteComputeEnvironmentI // any job queues with the UpdateJobQueue API operation. Compute environments // that use AWS Fargate resources must terminate all active jobs on that compute // environment before deleting the compute environment. If this isn't done, -// the compute environment will end up in an invalid state. +// the compute environment enters an invalid state. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1158,9 +1158,9 @@ func (c *Batch) ListJobsRequest(input *ListJobsInput) (req *request.Request, out // // * A job queue ID to return a list of jobs in that job queue // -// * A multi-node parallel job ID to return a list of that job's nodes +// * A multi-node parallel job ID to return a list of nodes for that job // -// * An array job ID to return a list of that job's children +// * An array job ID to return a list of the children for that job // // You can filter the results by job status with the jobStatus parameter. If // you don't specify a status, only RUNNING jobs are returned. @@ -1469,11 +1469,17 @@ func (c *Batch) SubmitJobRequest(input *SubmitJobInput) (req *request.Request, o // SubmitJob API operation for AWS Batch. // -// Submits an AWS Batch job from a job definition. Parameters specified during -// SubmitJob override parameters defined in the job definition. +// Submits an AWS Batch job from a job definition. Parameters that are specified +// during SubmitJob override parameters defined in the job definition. vCPU +// and memory requirements that are specified in the ResourceRequirements objects +// in the job definition are the exception. They can't be overridden this way +// using the memory and vcpus parameters. Rather, you must specify updates to +// job definition parameters in a ResourceRequirements object that's included +// in the containerOverrides parameter. // -// Jobs run on Fargate resources don't run for more than 14 days. After 14 days, -// the Fargate resources might no longer be available and the job is terminated. +// Jobs that run on Fargate resources can't be guaranteed to run for more than +// 14 days. This is because, after 14 days, Fargate resources might become unavailable +// and job might be terminated. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2521,10 +2527,10 @@ type ComputeResource struct { // // AWS Batch selects an instance type that best fits the needs of the jobs with // a preference for the lowest-cost instance type. If additional instances of - // the selected instance type aren't available, AWS Batch will wait for the - // additional instances to be available. If there are not enough instances available, - // or if the user is hitting Amazon EC2 service limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) - // then additional jobs aren't run until currently running jobs have completed. + // the selected instance type aren't available, AWS Batch waits for the additional + // instances to be available. If there aren't enough instances available, or + // if the user is hitting Amazon EC2 service limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) + // then additional jobs aren't run until the currently running jobs have completed. // This allocation strategy keeps costs lower but can limit scaling. If you // are using Spot Fleets with BEST_FIT then the Spot Fleet IAM Role must be // specified. @@ -2616,7 +2622,7 @@ type ComputeResource struct { // to launch any instance type within those families (for example, c5 or p3), // or you can specify specific sizes within a family (such as c5.8xlarge). You // can also choose optimal to select instance types (from the C4, M4, and R4 - // instance families) on the fly that match the demand of your job queues. + // instance families) that match the demand of your job queues. // // This parameter isn't applicable to jobs running on Fargate resources, and // shouldn't be specified. @@ -2644,10 +2650,10 @@ type ComputeResource struct { // The maximum number of Amazon EC2 vCPUs that a compute environment can reach. // // With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, - // AWS Batch might need to go above maxvCpus to meet your capacity requirements. - // In this event, AWS Batch will never go above maxvCpus by more than a single - // instance (e.g., no more than a single instance from among those specified - // in your compute environment). + // AWS Batch might need to exceed maxvCpus to meet your capacity requirements. + // In this event, AWS Batch never exceeds maxvCpus by more than a single instance. + // For example, no more than a single instance from among those specified in + // your compute environment is allocated. // // MaxvCpus is a required field MaxvCpus *int64 `locationName:"maxvCpus" type:"integer" required:"true"` @@ -2675,9 +2681,9 @@ type ComputeResource struct { // compute environment. One or more security groups must be specified, either // in securityGroupIds or using a launch template referenced in launchTemplate. // This parameter is required for jobs running on Fargate resources and must - // contain at least one security group. (Fargate does not support launch templates.) + // contain at least one security group. Fargate doesn't support launch templates. // If security groups are specified using both securityGroupIds and launchTemplate, - // the values in securityGroupIds will be used. + // the values in securityGroupIds is used. SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list"` // The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied @@ -2698,9 +2704,8 @@ type ComputeResource struct { SpotIamFleetRole *string `locationName:"spotIamFleetRole" type:"string"` // The VPC subnets into which the compute resources are launched. These subnets - // must be within the same VPC. This parameter is required for jobs running - // on Fargate resources, where it can contain up to 16 subnets. For more information, - // see VPCs and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + // must be within the same VPC. Fargate compute resources can contain up to + // 16 subnets. For more information, see VPCs and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) // in the Amazon VPC User Guide. // // Subnets is a required field @@ -2713,8 +2718,7 @@ type ComputeResource struct { // your AWS Batch instances in the Amazon EC2 console. These tags can't be updated // or removed after the compute environment has been created; any changes require // creating a new compute environment and removing the old compute environment. - // These tags are not seen when using the AWS Batch ListTagsForResource API - // operation. + // These tags aren't seen when using the AWS Batch ListTagsForResource API operation. // // This parameter isn't applicable to jobs running on Fargate resources, and // shouldn't be specified. @@ -2889,10 +2893,10 @@ type ComputeResourceUpdate struct { // The maximum number of Amazon EC2 vCPUs that an environment can reach. // // With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, - // AWS Batch might need to go above maxvCpus to meet your capacity requirements. - // In this event, AWS Batch will never go above maxvCpus by more than a single - // instance (e.g., no more than a single instance from among those specified - // in your compute environment). + // AWS Batch might need to exceed maxvCpus to meet your capacity requirements. + // In this event, AWS Batch never exceeds maxvCpus by more than a single instance. + // That is, no more than a single instance from among those specified in your + // compute environment. MaxvCpus *int64 `locationName:"maxvCpus" type:"integer"` // The minimum number of Amazon EC2 vCPUs that an environment should maintain. @@ -2908,12 +2912,12 @@ type ComputeResourceUpdate struct { // wasn't specified and no change is made. SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list"` - // The VPC subnets that the compute resources are launched into. This parameter - // is required for jobs running on Fargate compute resources, where it can contain - // up to 16 subnets. For more information, see VPCs and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - // in the Amazon VPC User Guide. This can't be specified for EC2 compute resources. - // Providing an empty list will be handled as if this parameter wasn't specified - // and no change is made. + // The VPC subnets that the compute resources are launched into. Fargate compute + // resources can contain up to 16 subnets. Providing an empty list will be handled + // as if this parameter wasn't specified and no change is made. This can't be + // specified for EC2 compute resources. For more information, see VPCs and Subnets + // (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in the + // Amazon VPC User Guide. Subnets []*string `locationName:"subnets" type:"list"` } @@ -3008,7 +3012,7 @@ type ContainerDetail struct { // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/). // By default, containers use the same logging driver that the Docker daemon - // uses. However the container might use a different logging driver than the + // uses. However, the container might use a different logging driver than the // Docker daemon by specifying a log driver with this parameter in the container // definition. To use a different logging driver for a container, the log system // must be configured properly on the container instance. Or, alternatively, @@ -3102,9 +3106,9 @@ type ContainerDetail struct { // and the --user option to docker run (https://docs.docker.com/engine/reference/run/). User *string `locationName:"user" type:"string"` - // The number of vCPUs reserved for the container. Jobs running on EC2 resources - // can specify the vCPU requirement for the job using resourceRequirements but - // the vCPU requirements can't be specified both here and in the resourceRequirement + // The number of vCPUs reserved for the container. For jobs that run on EC2 + // resources, you can specify the vCPU requirement for the job using resourceRequirements, + // but you can't specify the vCPU requirements in both the vcpus and resourceRequirement // object. This parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/). @@ -3112,9 +3116,9 @@ type ContainerDetail struct { // vCPU. This is required but can be specified in several places. It must be // specified for each node at least once. // - // This parameter isn't applicable to jobs running on Fargate resources. Jobs - // running on Fargate resources must specify the vCPU requirement for the job - // using resourceRequirements. + // This parameter isn't applicable to jobs that run on Fargate resources. For + // jobs that run on Fargate resources, you must specify the vCPU requirement + // for the job using resourceRequirements. Vcpus *int64 `locationName:"vcpus" type:"integer"` // A list of volumes associated with the job. @@ -3309,10 +3313,14 @@ type ContainerOverrides struct { // running on Fargate resources and shouldn't be provided. InstanceType *string `locationName:"instanceType" type:"string"` - // This parameter is deprecated and not supported for jobs run on Fargate resources, - // use ResourceRequirement. For jobs run on EC2 resource, the number of MiB - // of memory reserved for the job. This value overrides the value set in the - // job definition. + // This parameter indicates the amount of memory (in MiB) that's reserved for + // the job. It overrides the memory parameter set in the job definition, but + // doesn't override any memory requirement specified in the ResourceRequirement + // structure in the job definition. + // + // This parameter is supported for jobs that run on EC2 resources, but isn't + // supported for jobs that run on Fargate resources. For these resources, use + // resourceRequirement instead. // // Deprecated: This field is deprecated, use resourceRequirements instead. Memory *int64 `locationName:"memory" deprecated:"true" type:"integer"` @@ -3322,21 +3330,25 @@ type ContainerOverrides struct { // MEMORY, and VCPU. ResourceRequirements []*ResourceRequirement `locationName:"resourceRequirements" type:"list"` - // This parameter is deprecated and not supported for jobs run on Fargate resources, - // see resourceRequirement. For jobs run on EC2 resources, the number of vCPUs - // to reserve for the container. This value overrides the value set in the job - // definition. Jobs run on EC2 resources can specify the vCPU requirement using - // resourceRequirement but the vCPU requirements can't be specified both here - // and in resourceRequirement. This parameter maps to CpuShares in the Create - // a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) + // This parameter indicates the number of vCPUs reserved for the container.It + // overrides the vcpus parameter that's set in the job definition, but doesn't + // override any vCPU requirement specified in the resourceRequirement structure + // in the job definition. + // + // This parameter is supported for jobs that run on EC2 resources, but isn't + // supported for jobs that run on Fargate resources. For Fargate resources, + // you can only use resourceRequirement. For EC2 resources, you can use either + // this parameter or resourceRequirement but not both. + // + // This parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/). // Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one // vCPU. // - // This parameter isn't applicable to jobs running on Fargate resources and - // shouldn't be provided. Jobs running on Fargate resources must specify the - // vCPU requirement for the job using resourceRequirements. + // This parameter isn't applicable to jobs that run on Fargate resources and + // shouldn't be provided. For jobs that run on Fargate resources, you must specify + // the vCPU requirement for the job using resourceRequirements. // // Deprecated: This field is deprecated, use resourceRequirements instead. Vcpus *int64 `locationName:"vcpus" deprecated:"true" type:"integer"` @@ -3434,8 +3446,8 @@ type ContainerProperties struct { Environment []*KeyValuePair `locationName:"environment" type:"list"` // The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. - // Jobs running on Fargate resources must provide an execution role. For more - // information, see AWS Batch execution IAM role (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) + // For jobs that run on Fargate resources, you must provide an execution role. + // For more information, see AWS Batch execution IAM role (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) // in the AWS Batch User Guide. ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` @@ -3473,7 +3485,7 @@ type ContainerProperties struct { // a multi-node parallel job must use the same instance type. // // This parameter isn't applicable to single-node container jobs or for jobs - // running on Fargate resources and shouldn't be provided. + // that run on Fargate resources and shouldn't be provided. InstanceType *string `locationName:"instanceType" type:"string"` // The Amazon Resource Name (ARN) of the IAM role that the container can assume @@ -3515,17 +3527,19 @@ type ContainerProperties struct { // in the Amazon Elastic Container Service Developer Guide. LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` - // This parameter is deprecated and not supported for jobs run on Fargate resources, - // use ResourceRequirement. For jobs run on EC2 resources can specify the memory - // requirement using the ResourceRequirement structure. The hard limit (in MiB) - // of memory to present to the container. If your container attempts to exceed - // the memory specified here, the container is killed. This parameter maps to - // Memory in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) + // This parameter indicates the memory hard limit (in MiB) for a container. + // If your container attempts to exceed the specified number, it is terminated. + // You must specify at least 4 MiB of memory for a job using this parameter. + // The memory hard limit can be specified in several places. It must be specified + // for each node at least once. + // + // This parameter maps to Memory in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/). - // You must specify at least 4 MiB of memory for a job. This is required but - // can be specified in several places; it must be specified for each node at - // least once. + // + // This parameter is supported on EC2 resources but isn't supported on Fargate + // resources. For Fargate resources, you should specify the memory requirement + // using resourceRequirement. You can do this for EC2 resources. // // If you're trying to maximize your resource utilization by providing your // jobs as much memory as possible for a particular instance type, see Memory @@ -3587,21 +3601,21 @@ type ContainerProperties struct { // and the --user option to docker run (https://docs.docker.com/engine/reference/run/). User *string `locationName:"user" type:"string"` - // This parameter is deprecated and not supported for jobs run on Fargate resources, - // see resourceRequirement. The number of vCPUs reserved for the container. - // Jobs running on EC2 resources can specify the vCPU requirement for the job - // using resourceRequirements but the vCPU requirements can't be specified both - // here and in the resourceRequirement structure. This parameter maps to CpuShares - // in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) + // The number of vCPUs reserved for the job. Each vCPU is equivalent to 1,024 + // CPU shares. This parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/). - // Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one - // vCPU. This is required but can be specified in several places. It must be - // specified for each node at least once. + // The number of vCPUs must be specified but can be be specified in several + // places. You must specify it at least once for each node. + // + // This parameter is supported on EC2 resources but isn't supported for jobs + // that run on Fargate resources. For these resources, use resourceRequirement + // instead. You can use this parameter or resourceRequirements structure but + // not both. // // This parameter isn't applicable to jobs running on Fargate resources and - // shouldn't be provided. Jobs running on Fargate resources must specify the - // vCPU requirement for the job using resourceRequirements. + // shouldn't be provided. For jobs that run on Fargate resources, you must specify + // the vCPU requirement for the job using resourceRequirements. // // Deprecated: This field is deprecated, use resourceRequirements instead. Vcpus *int64 `locationName:"vcpus" deprecated:"true" type:"integer"` @@ -3845,18 +3859,25 @@ type CreateComputeEnvironmentInput struct { // see AWS Batch service IAM role (https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html) // in the AWS Batch User Guide. // - // If your specified role has a path other than /, then you must either specify - // the full role ARN (this is recommended) or prefix the role name with the - // path. + // If your account has already created the AWS Batch service-linked role, that + // role is used by default for your compute environment unless you specify a + // role here. If the AWS Batch service-linked role does not exist in your account, + // and no role is specified here, the service will try to create the AWS Batch + // service-linked role in your account. + // + // If your specified role has a path other than /, then you must specify either + // the full role ARN (recommended) or prefix the role name with the path. For + // example, if a role with the name bar has a path of /foo/ then you would specify + // /foo/bar as the role name. For more information, see Friendly names and paths + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) + // in the IAM User Guide. // // Depending on how you created your AWS Batch service role, its ARN might contain // the service-role path prefix. When you only specify the name of the service // role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. // Because of this, we recommend that you specify the full ARN of your service // role when you create compute environments. - // - // ServiceRole is a required field - ServiceRole *string `locationName:"serviceRole" type:"string" required:"true"` + ServiceRole *string `locationName:"serviceRole" type:"string"` // The state of the compute environment. If the state is ENABLED, then the compute // environment accepts jobs from a queue and can scale out automatically based @@ -3908,9 +3929,6 @@ func (s *CreateComputeEnvironmentInput) Validate() error { if s.ComputeEnvironmentName == nil { invalidParams.Add(request.NewErrParamRequired("ComputeEnvironmentName")) } - if s.ServiceRole == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceRole")) - } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } @@ -4417,11 +4435,11 @@ type DescribeJobDefinitionsInput struct { // The maximum number of results returned by DescribeJobDefinitions in paginated // output. When this parameter is used, DescribeJobDefinitions only returns - // maxResults results in a single page along with a nextToken response element. - // The remaining results of the initial request can be seen by sending another - // DescribeJobDefinitions request with the returned nextToken value. This value - // can be between 1 and 100. If this parameter isn't used, then DescribeJobDefinitions - // returns up to 100 results and a nextToken value if applicable. + // maxResults results in a single page and a nextToken response element. The + // remaining results of the initial request can be seen by sending another DescribeJobDefinitions + // request with the returned nextToken value. This value can be between 1 and + // 100. If this parameter isn't used, then DescribeJobDefinitions returns up + // to 100 results and a nextToken value if applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` // The nextToken value returned from a previous paginated DescribeJobDefinitions @@ -4523,7 +4541,7 @@ type DescribeJobQueuesInput struct { // The maximum number of results returned by DescribeJobQueues in paginated // output. When this parameter is used, DescribeJobQueues only returns maxResults - // results in a single page along with a nextToken response element. The remaining + // results in a single page and a nextToken response element. The remaining // results of the initial request can be seen by sending another DescribeJobQueues // request with the returned nextToken value. This value can be between 1 and // 100. If this parameter isn't used, then DescribeJobQueues returns up to 100 @@ -4673,7 +4691,7 @@ func (s *DescribeJobsOutput) SetJobs(v []*JobDetail) *DescribeJobsOutput { type Device struct { _ struct{} `type:"structure"` - // The path inside the container used to expose the host device. By default + // The path inside the container used to expose the host device. By default, // the hostPath value is used. ContainerPath *string `locationName:"containerPath" type:"string"` @@ -4815,29 +4833,29 @@ type EvaluateOnExit struct { _ struct{} `type:"structure"` // Specifies the action to take if all of the specified conditions (onStatusReason, - // onReason, and onExitCode) are met. The values are not case sensitive. + // onReason, and onExitCode) are met. The values aren't case sensitive. // // Action is a required field Action *string `locationName:"action" type:"string" required:"true" enum:"RetryAction"` // Contains a glob pattern to match against the decimal representation of the - // ExitCode returned for a job. The patten can be up to 512 characters long, + // ExitCode returned for a job. The pattern can be up to 512 characters long, // can contain only numbers, and can optionally end with an asterisk (*) so // that only the start of the string needs to be an exact match. OnExitCode *string `locationName:"onExitCode" type:"string"` // Contains a glob pattern to match against the Reason returned for a job. The - // patten can be up to 512 characters long, can contain letters, numbers, periods - // (.), colons (:), and white space (spaces, tabs), and can optionally end with - // an asterisk (*) so that only the start of the string needs to be an exact - // match. + // pattern can be up to 512 characters long, and can contain letters, numbers, + // periods (.), colons (:), and white space (including spaces and tabs). It + // can optionally end with an asterisk (*) so that only the start of the string + // needs to be an exact match. OnReason *string `locationName:"onReason" type:"string"` // Contains a glob pattern to match against the StatusReason returned for a - // job. The patten can be up to 512 characters long, can contain letters, numbers, - // periods (.), colons (:), and white space (spaces, tabs). and can optionally - // end with an asterisk (*) so that only the start of the string needs to be - // an exact match. + // job. The pattern can be up to 512 characters long, and can contain letters, + // numbers, periods (.), colons (:), and white space (including spaces or tabs). + // It can optionally end with an asterisk (*) so that only the start of the + // string needs to be an exact match. OnStatusReason *string `locationName:"onStatusReason" type:"string"` } @@ -4888,16 +4906,16 @@ func (s *EvaluateOnExit) SetOnStatusReason(v string) *EvaluateOnExit { return s } -// The platform configuration for jobs running on Fargate resources. Jobs running -// on EC2 resources must not specify this parameter. +// The platform configuration for jobs running on Fargate resources. For jobs +// that run on EC2 resources, you shouldn't specify this parameter. type FargatePlatformConfiguration struct { _ struct{} `type:"structure"` - // The AWS Fargate platform version on which the jobs are running. A platform - // version is specified only for jobs running on Fargate resources. If one isn't - // specified, the LATEST platform version is used by default. This will use - // a recent, approved version of the AWS Fargate platform for compute resources. - // For more information, see AWS Fargate platform versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) + // The AWS Fargate platform version where the jobs are running. A platform version + // is specified only for jobs running on Fargate resources. If one isn't specified, + // the LATEST platform version is used by default. This uses a recent, approved + // version of the AWS Fargate platform for compute resources. For more information, + // see AWS Fargate platform versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) // in the Amazon Elastic Container Service Developer Guide. PlatformVersion *string `locationName:"platformVersion" type:"string"` } @@ -4929,11 +4947,11 @@ type Host struct { // If this parameter is empty, then the Docker daemon has assigned a host path // for you. If this parameter contains a file location, then the data volume // persists at the specified location on the host container instance until you - // delete it manually. If the source path location does not exist on the host + // delete it manually. If the source path location doesn't exist on the host // container instance, the Docker daemon creates it. If the location does exist, // the contents of the source path folder are exported. // - // This parameter isn't applicable to jobs running on Fargate resources and + // This parameter isn't applicable to jobs that run on Fargate resources and // shouldn't be provided. SourcePath *string `locationName:"sourcePath" type:"string"` } @@ -5213,8 +5231,8 @@ type JobDetail struct { PlatformCapabilities []*string `locationName:"platformCapabilities" type:"list"` // Specifies whether to propagate the tags from the job or job definition to - // the corresponding Amazon ECS task. If no value is specified, the tags are - // not propagated. Tags can only be propagated to the tasks during task creation. + // the corresponding Amazon ECS task. If no value is specified, the tags aren't + // propagated. Tags can only be propagated to the tasks during task creation. // For tags with the same name, job tags are given priority over job definitions // tags. If the total number of combined tags from the job and job definition // is over 50, the job is moved to the FAILED state. @@ -5424,7 +5442,7 @@ type JobQueueDetail struct { // for example, a job queue with a priority value of 10 is given scheduling // preference over a job queue with a priority value of 1. All of the compute // environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); - // EC2 and Fargate compute environments cannot be mixed. + // EC2 and Fargate compute environments can't be mixed. // // Priority is a required field Priority *int64 `locationName:"priority" type:"integer" required:"true"` @@ -5818,7 +5836,7 @@ type LinuxParameters struct { // EC2 resources. // // * If the maxSwap and swappiness parameters are omitted from a job definition, - // each container will have a default swappiness value of 60 and the total + // each container will have a default swappiness value of 60, and the total // swap usage will be limited to two times the memory reservation of the // container. // @@ -5928,11 +5946,10 @@ type ListJobsInput struct { // The maximum number of results returned by ListJobs in paginated output. When // this parameter is used, ListJobs only returns maxResults results in a single - // page along with a nextToken response element. The remaining results of the - // initial request can be seen by sending another ListJobs request with the - // returned nextToken value. This value can be between 1 and 100. If this parameter - // isn't used, then ListJobs returns up to 100 results and a nextToken value - // if applicable. + // page and a nextToken response element. The remaining results of the initial + // request can be seen by sending another ListJobs request with the returned + // nextToken value. This value can be between 1 and 100. If this parameter isn't + // used, then ListJobs returns up to 100 results and a nextToken value if applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` // The job ID for a multi-node parallel job. Specifying a multi-node parallel @@ -6157,7 +6174,7 @@ type LogConfiguration struct { // and options, see Syslog logging driver (https://docs.docker.com/config/containers/logging/syslog/) // in the Docker documentation. // - // If you have a custom driver that'sn't listed earlier that you want to work + // If you have a custom driver that's not listed earlier that you want to work // with the Amazon ECS container agent, you can fork the Amazon ECS container // agent project that's available on GitHub (https://github.com/aws/amazon-ecs-agent) // and customize it to work with that driver. We encourage you to submit pull @@ -6953,10 +6970,10 @@ type ResourceRequirement struct { // // type="MEMORY" // - // For jobs running on EC2 resources, the hard limit (in MiB) of memory to present - // to the container. If your container attempts to exceed the memory specified - // here, the container is killed. This parameter maps to Memory in the Create - // a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) + // The memory hard limit (in MiB) present to the container. This parameter is + // supported for jobs running on EC2 resources. If your container attempts to + // exceed the memory specified, the container is terminated. This parameter + // maps to Memory in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/). // You must specify at least 4 MiB of memory for a job. This is required but @@ -7174,7 +7191,7 @@ type Secret struct { // in the AWS Systems Manager Parameter Store. // // If the AWS Systems Manager Parameter Store parameter exists in the same Region - // as the job you are launching, then you can use either the full ARN or name + // as the job you're launching, then you can use either the full ARN or name // of the parameter. If the parameter exists in a different Region, then the // full ARN must be specified. // @@ -7287,13 +7304,12 @@ type SubmitJobInput struct { // AWS Batch User Guide. ArrayProperties *ArrayProperties `locationName:"arrayProperties" type:"structure"` - // A list of container overrides in JSON format that specify the name of a container - // in the specified job definition and the overrides it should receive. You - // can override the default command for a container (that's specified in the - // job definition or the Docker image) with a command override. You can also - // override existing environment variables (that are specified in the job definition - // or Docker image) on a container or add new environment variables to it with - // an environment override. + // A list of container overrides in the JSON format that specify the name of + // a container in the specified job definition and the overrides it should receive. + // You can override the default command for a container, which is specified + // in the job definition or the Docker image, with a command override. You can + // also override existing environment variables on a container or add new environment + // variables to it with an environment override. ContainerOverrides *ContainerOverrides `locationName:"containerOverrides" type:"structure"` // A list of dependencies for the job. A job can depend upon a maximum of 20 @@ -7319,8 +7335,8 @@ type SubmitJobInput struct { // JobName is a required field JobName *string `locationName:"jobName" type:"string" required:"true"` - // The job queue into which the job is submitted. You can specify either the - // name or the Amazon Resource Name (ARN) of the queue. + // The job queue where the job is submitted. You can specify either the name + // or the Amazon Resource Name (ARN) of the queue. // // JobQueue is a required field JobQueue *string `locationName:"jobQueue" type:"string" required:"true"` @@ -7922,9 +7938,9 @@ type UpdateComputeEnvironmentInput struct { // // Depending on how you created your AWS Batch service role, its ARN might contain // the service-role path prefix. When you only specify the name of the service - // role, AWS Batch assumes that your ARN does not use the service-role path - // prefix. Because of this, we recommend that you specify the full ARN of your - // service role when you create compute environments. + // role, AWS Batch assumes that your ARN doesn't use the service-role path prefix. + // Because of this, we recommend that you specify the full ARN of your service + // role when you create compute environments. ServiceRole *string `locationName:"serviceRole" type:"string"` // The state of the compute environment. Compute environments in the ENABLED @@ -8033,7 +8049,7 @@ type UpdateJobQueueInput struct { // to determine which compute environment should run a given job. Compute environments // must be in the VALID state before you can associate them with a job queue. // All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate - // (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be + // (FARGATE or FARGATE_SPOT). EC2 and Fargate compute environments can't be // mixed. // // All compute environments that are associated with a job queue must share @@ -8051,14 +8067,13 @@ type UpdateJobQueueInput struct { // with the same compute environment. Priority is determined in descending order, // for example, a job queue with a priority value of 10 is given scheduling // preference over a job queue with a priority value of 1. All of the compute - // environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); - // EC2 and Fargate compute environments cannot be mixed. + // environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). + // EC2 and Fargate compute environments can't be mixed. Priority *int64 `locationName:"priority" type:"integer"` // Describes the queue's ability to accept new jobs. If the job queue state - // is ENABLED, it is able to accept jobs. If the job queue state is DISABLED, - // new jobs cannot be added to the queue, but jobs already in the queue can - // finish. + // is ENABLED, it can accept jobs. If the job queue state is DISABLED, new jobs + // can't be added to the queue, but jobs already in the queue can finish. State *string `locationName:"state" type:"string" enum:"JQState"` } diff --git a/service/batch/doc.go b/service/batch/doc.go index 1722344addc..50b23d93b88 100644 --- a/service/batch/doc.go +++ b/service/batch/doc.go @@ -5,13 +5,13 @@ // // Using AWS Batch, you can run batch computing workloads on the AWS Cloud. // Batch computing is a common means for developers, scientists, and engineers -// to access large amounts of compute resources. AWS Batch utilizes the advantages +// to access large amounts of compute resources. AWS Batch uses the advantages // of this computing workload to remove the undifferentiated heavy lifting of -// configuring and managing required infrastructure, while also adopting a familiar -// batch computing software approach. Given these advantages, AWS Batch can -// help you to efficiently provision resources in response to jobs submitted, -// thus effectively helping to eliminate capacity constraints, reduce compute -// costs, and deliver your results more quickly. +// configuring and managing required infrastructure. At the same time, it also +// adopts a familiar batch computing software approach. Given these advantages, +// AWS Batch can help you to efficiently provision resources in response to +// jobs submitted, thus effectively helping you to eliminate capacity constraints, +// reduce compute costs, and deliver your results more quickly. // // As a fully managed service, AWS Batch can run batch computing workloads of // any scale. AWS Batch automatically provisions compute resources and optimizes diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index 4be26f09ed7..0216e7ebd6b 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -51422,6 +51422,22 @@ type InputConfig struct { // "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] "CompilerOptions": // {"class_labels": "imagenet_labels_1000.txt"} // + // Depending on the model format, DataInputConfig requires the following parameters + // for ml_eia2 OutputConfig:TargetDevice (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice). + // + // * For TensorFlow models saved in the SavedModel format, specify the input + // names from signature_def_key and the input model shapes for DataInputConfig. + // Specify the signature_def_key in OutputConfig:CompilerOptions (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions) + // if the model does not use TensorFlow's default signature def key. For + // example: "DataInputConfig": {"inputs": [1, 224, 224, 3]} "CompilerOptions": + // {"signature_def_key": "serving_custom"} + // + // * For TensorFlow models saved as a frozen graph, specify the input tensor + // names and shapes in DataInputConfig and the output tensor names for output_names + // in OutputConfig:CompilerOptions (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions). + // For example: "DataInputConfig": {"input_tensor:0": [1, 224, 224, 3]} "CompilerOptions": + // {"output_names": ["output_tensor:0"]} + // // DataInputConfig is a required field DataInputConfig *string `min:"1" type:"string" required:"true"` @@ -64289,6 +64305,15 @@ type OutputConfig struct { // labels file name inside input tar.gz file. For example, {"class_labels": // "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated // by newlines. + // + // * EIA: Compilation for the Elastic Inference Accelerator supports the + // following compiler options: precision_mode: Specifies the precision of + // compiled artifacts. Supported values are "FP16" and "FP32". Default is + // "FP32". signature_def_key: Specifies the signature to use for models in + // SavedModel format. Defaults is TensorFlow's default signature def key. + // output_names: Specifies a list of output tensor names for models in FrozenGraph + // format. Set at most one API field, either: signature_def_key or output_names. + // For example: {"precision_mode": "FP32", "output_names": ["output:0"]} CompilerOptions *string `min:"3" type:"string"` // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to @@ -80608,6 +80633,9 @@ const ( // TargetDeviceMlInf1 is a TargetDevice enum value TargetDeviceMlInf1 = "ml_inf1" + // TargetDeviceMlEia2 is a TargetDevice enum value + TargetDeviceMlEia2 = "ml_eia2" + // TargetDeviceJetsonTx1 is a TargetDevice enum value TargetDeviceJetsonTx1 = "jetson_tx1" @@ -80678,6 +80706,7 @@ func TargetDevice_Values() []string { TargetDeviceMlP3, TargetDeviceMlG4dn, TargetDeviceMlInf1, + TargetDeviceMlEia2, TargetDeviceJetsonTx1, TargetDeviceJetsonTx2, TargetDeviceJetsonNano,