From 00737e53d6271118d081ced7ec44d1e51e23b073 Mon Sep 17 00:00:00 2001 From: awstools Date: Tue, 17 Dec 2024 19:11:28 +0000 Subject: [PATCH] feat(client-batch): This feature allows AWS Batch on Amazon EKS to support configuration of Pod Annotations, overriding Namespace on which the Batch job's Pod runs on, and allows Subpath and Persistent Volume claim to be set for AWS Batch on Amazon EKS jobs. --- .../CreateComputeEnvironmentCommand.ts | 8 +- .../commands/DescribeJobDefinitionsCommand.ts | 20 ++ .../src/commands/DescribeJobsCommand.ts | 20 ++ .../commands/RegisterJobDefinitionCommand.ts | 20 ++ .../src/commands/SubmitJobCommand.ts | 8 + clients/client-batch/src/models/models_0.ts | 231 +++++++++++------- .../src/protocols/Aws_restJson1.ts | 9 + codegen/sdk-codegen/aws-models/batch.json | 99 ++++++-- 8 files changed, 300 insertions(+), 115 deletions(-) diff --git a/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts b/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts index a353a128a046..b6a1915faed7 100644 --- a/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts +++ b/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts @@ -94,11 +94,9 @@ export interface CreateComputeEnvironmentCommandOutput extends CreateComputeEnvi * *
  • *

    Set the update to latest image version (updateToLatestImageVersion) - * parameter to - * true. - * The updateToLatestImageVersion parameter is used when you update a compute - * environment. This parameter is ignored when you create a compute - * environment.

    + * parameter to true. The updateToLatestImageVersion parameter + * is used when you update a compute environment. This parameter is ignored when you create + * a compute environment.

    *
  • *
  • *

    Don't specify an AMI ID in imageId, imageIdOverride (in diff --git a/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts b/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts index 5561dbd93af8..e73e12aa9a3c 100644 --- a/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts +++ b/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts @@ -375,6 +375,7 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti * // { // EksContainerVolumeMount * // name: "STRING_VALUE", * // mountPath: "STRING_VALUE", + * // subPath: "STRING_VALUE", * // readOnly: true || false, * // }, * // ], @@ -413,6 +414,7 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti * // { * // name: "STRING_VALUE", * // mountPath: "STRING_VALUE", + * // subPath: "STRING_VALUE", * // readOnly: true || false, * // }, * // ], @@ -440,12 +442,20 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti * // secretName: "STRING_VALUE", // required * // optional: true || false, * // }, + * // persistentVolumeClaim: { // EksPersistentVolumeClaim + * // claimName: "STRING_VALUE", // required + * // readOnly: true || false, + * // }, * // }, * // ], * // metadata: { // EksMetadata * // labels: { // EksLabelsMap * // "": "STRING_VALUE", * // }, + * // annotations: { // EksAnnotationsMap + * // "": "STRING_VALUE", + * // }, + * // namespace: "STRING_VALUE", * // }, * // shareProcessNamespace: true || false, * // }, @@ -535,6 +545,7 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti * // { * // name: "STRING_VALUE", * // mountPath: "STRING_VALUE", + * // subPath: "STRING_VALUE", * // readOnly: true || false, * // }, * // ], @@ -573,6 +584,7 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti * // { * // name: "STRING_VALUE", * // mountPath: "STRING_VALUE", + * // subPath: "STRING_VALUE", * // readOnly: true || false, * // }, * // ], @@ -600,12 +612,20 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti * // secretName: "STRING_VALUE", // required * // optional: true || false, * // }, + * // persistentVolumeClaim: { + * // claimName: "STRING_VALUE", // required + * // readOnly: true || false, + * // }, * // }, * // ], * // metadata: { * // labels: { * // "": "STRING_VALUE", * // }, + * // annotations: { + * // "": "STRING_VALUE", + * // }, + * // namespace: "STRING_VALUE", * // }, * // shareProcessNamespace: true || false, * // }, diff --git a/clients/client-batch/src/commands/DescribeJobsCommand.ts b/clients/client-batch/src/commands/DescribeJobsCommand.ts index 6f3062a30071..0cd036e1750f 100644 --- a/clients/client-batch/src/commands/DescribeJobsCommand.ts +++ b/clients/client-batch/src/commands/DescribeJobsCommand.ts @@ -431,6 +431,7 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad * // { // EksContainerVolumeMount * // name: "STRING_VALUE", * // mountPath: "STRING_VALUE", + * // subPath: "STRING_VALUE", * // readOnly: true || false, * // }, * // ], @@ -469,6 +470,7 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad * // { * // name: "STRING_VALUE", * // mountPath: "STRING_VALUE", + * // subPath: "STRING_VALUE", * // readOnly: true || false, * // }, * // ], @@ -496,12 +498,20 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad * // secretName: "STRING_VALUE", // required * // optional: true || false, * // }, + * // persistentVolumeClaim: { // EksPersistentVolumeClaim + * // claimName: "STRING_VALUE", // required + * // readOnly: true || false, + * // }, * // }, * // ], * // metadata: { // EksMetadata * // labels: { // EksLabelsMap * // "": "STRING_VALUE", * // }, + * // annotations: { // EksAnnotationsMap + * // "": "STRING_VALUE", + * // }, + * // namespace: "STRING_VALUE", * // }, * // shareProcessNamespace: true || false, * // }, @@ -563,6 +573,7 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad * // { * // name: "STRING_VALUE", * // mountPath: "STRING_VALUE", + * // subPath: "STRING_VALUE", * // readOnly: true || false, * // }, * // ], @@ -603,6 +614,7 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad * // { * // name: "STRING_VALUE", * // mountPath: "STRING_VALUE", + * // subPath: "STRING_VALUE", * // readOnly: true || false, * // }, * // ], @@ -630,6 +642,10 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad * // secretName: "STRING_VALUE", // required * // optional: true || false, * // }, + * // persistentVolumeClaim: { + * // claimName: "STRING_VALUE", // required + * // readOnly: true || false, + * // }, * // }, * // ], * // podName: "STRING_VALUE", @@ -638,6 +654,10 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad * // labels: { * // "": "STRING_VALUE", * // }, + * // annotations: { + * // "": "STRING_VALUE", + * // }, + * // namespace: "STRING_VALUE", * // }, * // shareProcessNamespace: true || false, * // }, diff --git a/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts b/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts index 2446056be25a..44c5c6638dc3 100644 --- a/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts +++ b/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts @@ -344,6 +344,7 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio * { // EksContainerVolumeMount * name: "STRING_VALUE", * mountPath: "STRING_VALUE", + * subPath: "STRING_VALUE", * readOnly: true || false, * }, * ], @@ -382,6 +383,7 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio * { * name: "STRING_VALUE", * mountPath: "STRING_VALUE", + * subPath: "STRING_VALUE", * readOnly: true || false, * }, * ], @@ -409,12 +411,20 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio * secretName: "STRING_VALUE", // required * optional: true || false, * }, + * persistentVolumeClaim: { // EksPersistentVolumeClaim + * claimName: "STRING_VALUE", // required + * readOnly: true || false, + * }, * }, * ], * metadata: { // EksMetadata * labels: { // EksLabelsMap * "": "STRING_VALUE", * }, + * annotations: { // EksAnnotationsMap + * "": "STRING_VALUE", + * }, + * namespace: "STRING_VALUE", * }, * shareProcessNamespace: true || false, * }, @@ -478,6 +488,7 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio * { * name: "STRING_VALUE", * mountPath: "STRING_VALUE", + * subPath: "STRING_VALUE", * readOnly: true || false, * }, * ], @@ -516,6 +527,7 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio * { * name: "STRING_VALUE", * mountPath: "STRING_VALUE", + * subPath: "STRING_VALUE", * readOnly: true || false, * }, * ], @@ -543,12 +555,20 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio * secretName: "STRING_VALUE", // required * optional: true || false, * }, + * persistentVolumeClaim: { + * claimName: "STRING_VALUE", // required + * readOnly: true || false, + * }, * }, * ], * metadata: { * labels: { * "": "STRING_VALUE", * }, + * annotations: { + * "": "STRING_VALUE", + * }, + * namespace: "STRING_VALUE", * }, * shareProcessNamespace: true || false, * }, diff --git a/clients/client-batch/src/commands/SubmitJobCommand.ts b/clients/client-batch/src/commands/SubmitJobCommand.ts index 9883be081167..2c0dbe2b8e60 100644 --- a/clients/client-batch/src/commands/SubmitJobCommand.ts +++ b/clients/client-batch/src/commands/SubmitJobCommand.ts @@ -177,6 +177,10 @@ export interface SubmitJobCommandOutput extends SubmitJobResponse, __MetadataBea * labels: { // EksLabelsMap * "": "STRING_VALUE", * }, + * annotations: { // EksAnnotationsMap + * "": "STRING_VALUE", + * }, + * namespace: "STRING_VALUE", * }, * }, * }, @@ -251,6 +255,10 @@ export interface SubmitJobCommandOutput extends SubmitJobResponse, __MetadataBea * labels: { * "": "STRING_VALUE", * }, + * annotations: { + * "": "STRING_VALUE", + * }, + * namespace: "STRING_VALUE", * }, * }, * }, diff --git a/clients/client-batch/src/models/models_0.ts b/clients/client-batch/src/models/models_0.ts index f4ea47efd961..ee725b471716 100644 --- a/clients/client-batch/src/models/models_0.ts +++ b/clients/client-batch/src/models/models_0.ts @@ -673,21 +673,17 @@ export interface ComputeResource { * * *

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and - * SPOT_PRICE_CAPACITY_OPTIMIZED - * (recommended) strategies using On-Demand or Spot Instances, and the - * BEST_FIT strategy using Spot Instances, Batch might need to exceed - * maxvCpus to meet your capacity requirements. In this event, Batch never exceeds - * maxvCpus by more than a single instance.

    + * SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot + * Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to + * exceed maxvCpus to meet your capacity requirements. In this event, Batch never + * exceeds maxvCpus by more than a single instance.

    * @public */ allocationStrategy?: CRAllocationStrategy | undefined; /** - *

    The minimum number of - * vCPUs that - * a - * compute - * environment should maintain (even if the compute environment is DISABLED).

    + *

    The minimum number of vCPUs that a compute environment should maintain (even if the compute + * environment is DISABLED).

    * *

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    *
    @@ -696,15 +692,11 @@ export interface ComputeResource { minvCpus?: number | undefined; /** - *

    The maximum number of - * vCPUs that a - * compute environment can - * support.

    + *

    The maximum number of vCPUs that a compute environment can support.

    * *

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and - * SPOT_PRICE_CAPACITY_OPTIMIZED - * (recommended) strategies using On-Demand or Spot Instances, and the - * BEST_FIT strategy using Spot Instances, Batch might need to exceed + * SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, + * and the BEST_FIT strategy using Spot Instances, Batch might need to exceed * maxvCpus to meet your capacity requirements. In this event, Batch never exceeds * maxvCpus by more than a single instance.

    *
    @@ -713,10 +705,8 @@ export interface ComputeResource { maxvCpus: number | undefined; /** - *

    The desired number of - * vCPUS in the - * compute environment. Batch modifies this value between the minimum and maximum values based on - * job queue demand.

    + *

    The desired number of vCPUS in the compute environment. Batch modifies this value between + * the minimum and maximum values based on job queue demand.

    * *

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    *
    @@ -859,8 +849,7 @@ export interface ComputeResource { * percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for * that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum * percentage. If you leave this field empty, the default value is 100% of the On-Demand - * price. For most use cases, - * we recommend leaving this field empty.

    + * price. For most use cases, we recommend leaving this field empty.

    * *

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    *
    @@ -2447,35 +2436,26 @@ export interface ResourceRequirement { */ export interface RuntimePlatform { /** - *

    The operating system for the compute environment. - * Valid values are: + *

    The operating system for the compute environment. Valid values are: * LINUX (default), WINDOWS_SERVER_2019_CORE, * WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and * WINDOWS_SERVER_2022_FULL.

    * *

    The following parameters can’t be set for Windows containers: linuxParameters, * privileged, user, ulimits, - * readonlyRootFilesystem, - * and efsVolumeConfiguration.

    + * readonlyRootFilesystem, and efsVolumeConfiguration.

    *
    * - *

    The Batch Scheduler checks - * the compute environments - * that are attached to the job queue before registering a task definition with - * Fargate. In this - * scenario, the job queue is where the job is submitted. If the job requires a - * Windows container and the first compute environment is LINUX, the compute - * environment is skipped and the next compute environment is checked until a Windows-based compute - * environment is found.

    + *

    The Batch Scheduler checks the compute environments that are attached to the job queue before + * registering a task definition with Fargate. In this scenario, the job queue is where the job is + * submitted. If the job requires a Windows container and the first compute environment is LINUX, + * the compute environment is skipped and the next compute environment is checked until a Windows-based + * compute environment is found.

    *
    * - *

    Fargate Spot is not supported for - * ARM64 and - * Windows-based containers on Fargate. A job queue will be blocked if a - * Fargate - * ARM64 or - * Windows job is submitted to a job queue with only Fargate Spot compute environments. - * However, you can attach both FARGATE and + *

    Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. + * A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job + * queue with only Fargate Spot compute environments. However, you can attach both FARGATE and * FARGATE_SPOT compute environments to the same job queue.

    *
    * @public @@ -2486,9 +2466,7 @@ export interface RuntimePlatform { *

    The vCPU architecture. The default value is X86_64. Valid values are * X86_64 and ARM64.

    * - *

    This parameter must be set to - * X86_64 - * for Windows containers.

    + *

    This parameter must be set to X86_64 for Windows containers.

    *
    * *

    Fargate Spot is not supported for ARM64 and Windows-based containers on @@ -2702,16 +2680,13 @@ export interface Volume { } /** - *

    Container properties are used - * for - * Amazon ECS based job definitions. These properties to describe the container that's - * launched as part of a job.

    + *

    Container properties are used for Amazon ECS based job definitions. These properties to describe the + * container that's launched as part of a job.

    * @public */ export interface ContainerProperties { /** - *

    Required. - * The image used to start a container. This string is passed directly to the + *

    Required. The image used to start a container. This string is passed directly to the * Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are * specified with * @@ -3578,6 +3553,12 @@ export interface EksContainerVolumeMount { */ mountPath?: string | undefined; + /** + *

    A sub-path inside the referenced volume instead of its root.

    + * @public + */ + subPath?: string | undefined; + /** *

    If this value is true, the container has read-only access to the volume. * Otherwise, the container can write to the volume. The default value is false.

    @@ -3701,7 +3682,8 @@ export interface ImagePullSecret { /** *

    Describes and uniquely identifies Kubernetes resources. For example, the compute environment that * a pod runs in or the jobID for a job running in the pod. For more information, see - * Understanding Kubernetes Objects in the Kubernetes documentation.

    + * + * Understanding Kubernetes Objects in the Kubernetes documentation.

    * @public */ export interface EksMetadata { @@ -3713,6 +3695,63 @@ export interface EksMetadata { * @public */ labels?: Record | undefined; + + /** + *

    Key-value pairs used to attach arbitrary, non-identifying metadata to Kubernetes objects. + * Valid annotation keys have two segments: an optional prefix and a name, separated by a + * slash (/).

    + *
      + *
    • + *

      The prefix is optional and must be 253 characters or less. If specified, the prefix + * must be a DNS subdomain− a series of DNS labels separated by dots (.), and it must + * end with a slash (/).

      + *
    • + *
    • + *

      The name segment is required and must be 63 characters or less. It can include alphanumeric + * characters ([a-z0-9A-Z]), dashes (-), underscores (_), and dots (.), but must begin and end + * with an alphanumeric character.

      + *
    • + *
    + * + *

    Annotation values must be 255 characters or less.

    + *
    + *

    Annotations can be added or modified at any time. Each resource can have multiple annotations.

    + * @public + */ + annotations?: Record | undefined; + + /** + *

    The namespace of the Amazon EKS cluster. In Kubernetes, namespaces provide a mechanism for isolating + * groups of resources within a single cluster. Names of resources need to be unique within a namespace, + * but not across namespaces. Batch places Batch Job pods in this namespace. If this field is provided, + * the value can't be empty or null. It must meet the following requirements:

    + *
      + *
    • + *

      1-63 characters long

      + *
    • + *
    • + *

      Can't be set to default

      + *
    • + *
    • + *

      Can't start with kube + *

      + *
    • + *
    • + *

      Must match the following regular expression: + * ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + *

      + *
    • + *
    + *

    + * For more information, see + * Namespaces in the Kubernetes documentation. This namespace can be + * different from the kubernetesNamespace set in the compute environment's + * EksConfiguration, but must have identical role-based access control (RBAC) roles as + * the compute environment's kubernetesNamespace. For multi-node parallel jobs, + * the same value must be provided across all the node ranges.

    + * @public + */ + namespace?: string | undefined; } /** @@ -3768,6 +3807,31 @@ export interface EksHostPath { path?: string | undefined; } +/** + *

    A persistentVolumeClaim volume is used to mount a PersistentVolume + * into a Pod. PersistentVolumeClaims are a way for users to "claim" durable storage without knowing + * the details of the particular cloud environment. See the information about PersistentVolumes + * in the Kubernetes documentation.

    + * @public + */ +export interface EksPersistentVolumeClaim { + /** + *

    The name of the persistentVolumeClaim bounded to a persistentVolume. + * For more information, see + * Persistent Volume Claims in the Kubernetes documentation.

    + * @public + */ + claimName: string | undefined; + + /** + *

    An optional boolean value indicating if the mount is read only. Default is false. For more + * information, see + * Read Only Mounts in the Kubernetes documentation.

    + * @public + */ + readOnly?: boolean | undefined; +} + /** *

    Specifies the configuration of a Kubernetes secret volume. For more information, see * secret in the @@ -3824,6 +3888,14 @@ export interface EksVolume { * @public */ secret?: EksSecret | undefined; + + /** + *

    Specifies the configuration of a Kubernetes persistentVolumeClaim bounded to a + * persistentVolume. For more information, see + * Persistent Volume Claims in the Kubernetes documentation.

    + * @public + */ + persistentVolumeClaim?: EksPersistentVolumeClaim | undefined; } /** @@ -3903,10 +3975,7 @@ export interface EksPodProperties { volumes?: EksVolume[] | undefined; /** - *

    Metadata about the - * Kubernetes - * pod. For - * more information, see Understanding Kubernetes Objects in the Kubernetes + *

    Metadata about the Kubernetes pod. For more information, see Understanding Kubernetes Objects in the Kubernetes * documentation.

    * @public */ @@ -4050,10 +4119,8 @@ export interface EvaluateOnExit { /** *

    Contains a glob pattern to match against the StatusReason returned for a job. * The pattern can contain up to 512 characters. It can contain letters, numbers, periods (.), - * colons (:), and white spaces (including spaces or tabs). - * It can - * optionally end with an asterisk (*) so that only the start of the string needs to be an exact - * match.

    + * colons (:), and white spaces (including spaces or tabs). It can optionally end with an asterisk (*) + * so that only the start of the string needs to be an exact match.

    * @public */ onStatusReason?: string | undefined; @@ -4503,9 +4570,7 @@ export interface ContainerDetail { jobRoleArn?: string | undefined; /** - *

    The Amazon Resource Name (ARN) of the - * execution - * role that Batch can assume. For more information, + *

    The Amazon Resource Name (ARN) of the execution role that Batch can assume. For more information, * see Batch execution IAM * role in the Batch User Guide.

    * @public @@ -6613,8 +6678,7 @@ export interface EksPodPropertiesOverride { initContainers?: EksContainerOverride[] | undefined; /** - *

    Metadata about the - * overrides for the container that's used on the Amazon EKS pod.

    + *

    Metadata about the overrides for the container that's used on the Amazon EKS pod.

    * @public */ metadata?: EksMetadata | undefined; @@ -6747,10 +6811,8 @@ export interface SubmitJobRequest { /** *

    The scheduling priority for the job. This only affects jobs in job queues with a fair * share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower - * scheduling priority. - * This - * overrides any scheduling priority in the job definition and works only within a single share - * identifier.

    + * scheduling priority. This overrides any scheduling priority in the job definition and works only + * within a single share identifier.

    *

    The minimum supported value is 0 and the maximum supported value is 9999.

    * @public */ @@ -6994,9 +7056,8 @@ export type CRUpdateAllocationStrategy = (typeof CRUpdateAllocationStrategy)[key */ export interface ComputeResourceUpdate { /** - *

    The minimum number of - * vCPUs that - * an environment should maintain (even if the compute environment is DISABLED).

    + *

    The minimum number of vCPUs that an environment should maintain (even if the compute environment + * is DISABLED).

    * *

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    *
    @@ -7008,21 +7069,18 @@ export interface ComputeResourceUpdate { *

    The maximum number of Amazon EC2 vCPUs that an environment can reach.

    * *

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and - * SPOT_PRICE_CAPACITY_OPTIMIZED - * (recommended) strategies using On-Demand or Spot Instances, and the - * BEST_FIT strategy using Spot Instances, Batch might need to exceed - * maxvCpus to meet your capacity requirements. In this event, Batch never exceeds - * maxvCpus by more than a single instance.

    + * SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot + * Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to + * exceed maxvCpus to meet your capacity requirements. In this event, Batch never + * exceeds maxvCpus by more than a single instance.

    *
    * @public */ maxvCpus?: number | undefined; /** - *

    The desired number of - * vCPUS in the - * compute environment. Batch modifies this value between the minimum and maximum values based on - * job queue demand.

    + *

    The desired number of vCPUS in the compute environment. Batch modifies this value between + * the minimum and maximum values based on job queue demand.

    * *

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    *
    @@ -7112,9 +7170,8 @@ export interface ComputeResourceUpdate { * * *

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and - * SPOT_PRICE_CAPACITY_OPTIMIZED - * (recommended) strategies using On-Demand or Spot Instances, and the - * BEST_FIT strategy using Spot Instances, Batch might need to exceed + * SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, + * and the BEST_FIT strategy using Spot Instances, Batch might need to exceed * maxvCpus to meet your capacity requirements. In this event, Batch never exceeds * maxvCpus by more than a single instance.

    * @public @@ -7163,8 +7220,7 @@ export interface ComputeResourceUpdate { /** *

    The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. - * Required for Amazon EC2 - * instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance + * Required for Amazon EC2 instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance * profile. For example, * ecsInstanceRole * or @@ -7220,8 +7276,7 @@ export interface ComputeResourceUpdate { * price for that instance type before instances are launched. For example, if your maximum * percentage is 20%, the Spot price must be less than 20% of the current On-Demand price for that * Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum - * percentage. For most use - * cases, we recommend leaving this field empty.

    + * percentage. For most use cases, we recommend leaving this field empty.

    *

    When updating a compute environment, changing the bid percentage requires an infrastructure * update of the compute environment. For more information, see Updating compute environments in the * Batch User Guide.

    diff --git a/clients/client-batch/src/protocols/Aws_restJson1.ts b/clients/client-batch/src/protocols/Aws_restJson1.ts index 3e38b742af74..51c0db767f96 100644 --- a/clients/client-batch/src/protocols/Aws_restJson1.ts +++ b/clients/client-batch/src/protocols/Aws_restJson1.ts @@ -120,6 +120,7 @@ import { EksEmptyDir, EksHostPath, EksMetadata, + EksPersistentVolumeClaim, EksPodProperties, EksPodPropertiesOverride, EksProperties, @@ -1380,6 +1381,8 @@ const de_ServerExceptionRes = async (parsedOutput: any, context: __SerdeContext) // se_EFSVolumeConfiguration omitted. +// se_EksAnnotationsMap omitted. + // se_EksConfiguration omitted. // se_EksContainer omitted. @@ -1412,6 +1415,8 @@ const de_ServerExceptionRes = async (parsedOutput: any, context: __SerdeContext) // se_EksMetadata omitted. +// se_EksPersistentVolumeClaim omitted. + // se_EksPodProperties omitted. // se_EksPodPropertiesOverride omitted. @@ -1650,6 +1655,8 @@ const se_ShareAttributesList = (input: ShareAttributes[], context: __SerdeContex // de_EFSVolumeConfiguration omitted. +// de_EksAnnotationsMap omitted. + // de_EksAttemptContainerDetail omitted. // de_EksAttemptContainerDetails omitted. @@ -1690,6 +1697,8 @@ const se_ShareAttributesList = (input: ShareAttributes[], context: __SerdeContex // de_EksMetadata omitted. +// de_EksPersistentVolumeClaim omitted. + // de_EksPodProperties omitted. // de_EksPodPropertiesDetail omitted. diff --git a/codegen/sdk-codegen/aws-models/batch.json b/codegen/sdk-codegen/aws-models/batch.json index 48d478ba4e8d..52bf1fb638ed 100644 --- a/codegen/sdk-codegen/aws-models/batch.json +++ b/codegen/sdk-codegen/aws-models/batch.json @@ -1810,27 +1810,27 @@ "allocationStrategy": { "target": "com.amazonaws.batch#CRAllocationStrategy", "traits": { - "smithy.api#documentation": "

    The allocation strategy to use for the compute resource if not enough instances of the best\n fitting instance type can be allocated. This might be because of availability of the instance\n type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    \n
    \n
    BEST_FIT (default)
    \n
    \n

    Batch selects an instance type that best fits the needs of the jobs with a preference\n for the lowest-cost instance type. If additional instances of the selected instance type\n aren't available, Batch waits for the additional instances to be available. If there aren't\n enough instances available or the user is reaching Amazon EC2 service limits,\n additional jobs aren't run until the currently running jobs are completed. This allocation\n strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with\n BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use\n a BEST_FIT allocation strategy don't support infrastructure updates and can't\n update some parameters. For more information, see Updating compute environments in\n the Batch User Guide.

    \n
    \n
    BEST_FIT_PROGRESSIVE
    \n
    \n

    Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

    \n
    \n
    SPOT_CAPACITY_OPTIMIZED
    \n
    \n

    Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

    \n
    \n
    SPOT_PRICE_CAPACITY_OPTIMIZED
    \n
    \n

    The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

    \n
    \n
    \n

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

    " + "smithy.api#documentation": "

    The allocation strategy to use for the compute resource if not enough instances of the best\n fitting instance type can be allocated. This might be because of availability of the instance\n type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    \n
    \n
    BEST_FIT (default)
    \n
    \n

    Batch selects an instance type that best fits the needs of the jobs with a preference\n for the lowest-cost instance type. If additional instances of the selected instance type\n aren't available, Batch waits for the additional instances to be available. If there aren't\n enough instances available or the user is reaching Amazon EC2 service limits,\n additional jobs aren't run until the currently running jobs are completed. This allocation\n strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with\n BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use\n a BEST_FIT allocation strategy don't support infrastructure updates and can't\n update some parameters. For more information, see Updating compute environments in\n the Batch User Guide.

    \n
    \n
    BEST_FIT_PROGRESSIVE
    \n
    \n

    Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

    \n
    \n
    SPOT_CAPACITY_OPTIMIZED
    \n
    \n

    Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

    \n
    \n
    SPOT_PRICE_CAPACITY_OPTIMIZED
    \n
    \n

    The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

    \n
    \n
    \n

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot \n Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to \n exceed maxvCpus to meet your capacity requirements. In this event, Batch never \n exceeds maxvCpus by more than a single instance.

    " } }, "minvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

    The minimum number of\n vCPUs that\n a\n compute\n environment should maintain (even if the compute environment is DISABLED).

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " + "smithy.api#documentation": "

    The minimum number of vCPUs that a compute environment should maintain (even if the compute \n environment is DISABLED).

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " } }, "maxvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

    The maximum number of\n vCPUs that a\n compute environment can\n support.

    \n \n

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

    \n
    ", + "smithy.api#documentation": "

    The maximum number of vCPUs that a compute environment can support.

    \n \n

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, \n and the BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

    \n
    ", "smithy.api#required": {} } }, "desiredvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

    The desired number of\n vCPUS in the\n compute environment. Batch modifies this value between the minimum and maximum values based on\n job queue demand.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " + "smithy.api#documentation": "

    The desired number of vCPUS in the compute environment. Batch modifies this value between \n the minimum and maximum values based on job queue demand.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " } }, "instanceTypes": { @@ -1889,7 +1889,7 @@ "bidPercentage": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

    The maximum percentage that a Spot Instance price can be when compared with the On-Demand\n price for that instance type before instances are launched. For example, if your maximum\n percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for\n that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum\n percentage. If you leave this field empty, the default value is 100% of the On-Demand\n price. For most use cases,\n we recommend leaving this field empty.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " + "smithy.api#documentation": "

    The maximum percentage that a Spot Instance price can be when compared with the On-Demand\n price for that instance type before instances are launched. For example, if your maximum\n percentage is 20%, then the Spot price must be less than 20% of the current On-Demand price for\n that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum\n percentage. If you leave this field empty, the default value is 100% of the On-Demand\n price. For most use cases, we recommend leaving this field empty.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " } }, "spotIamFleetRole": { @@ -1921,19 +1921,19 @@ "minvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

    The minimum number of\n vCPUs that\n an environment should maintain (even if the compute environment is DISABLED).

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " + "smithy.api#documentation": "

    The minimum number of vCPUs that an environment should maintain (even if the compute environment \n is DISABLED).

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " } }, "maxvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

    The maximum number of Amazon EC2 vCPUs that an environment can reach.

    \n \n

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

    \n
    " + "smithy.api#documentation": "

    The maximum number of Amazon EC2 vCPUs that an environment can reach.

    \n \n

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot \n Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to \n exceed maxvCpus to meet your capacity requirements. In this event, Batch never \n exceeds maxvCpus by more than a single instance.

    \n
    " } }, "desiredvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

    The desired number of\n vCPUS in the\n compute environment. Batch modifies this value between the minimum and maximum values based on\n job queue demand.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    \n \n

    Batch doesn't support changing the desired number of vCPUs of an existing compute\n environment. Don't specify this parameter for compute environments using Amazon EKS clusters.

    \n
    \n \n

    When you update the desiredvCpus setting, the value must be between the\n minvCpus and maxvCpus values.

    \n

    Additionally, the updated desiredvCpus value must be greater than or equal to\n the current desiredvCpus value. For more information, see Troubleshooting\n Batch in the Batch User Guide.

    \n
    " + "smithy.api#documentation": "

    The desired number of vCPUS in the compute environment. Batch modifies this value between \n the minimum and maximum values based on job queue demand.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    \n \n

    Batch doesn't support changing the desired number of vCPUs of an existing compute\n environment. Don't specify this parameter for compute environments using Amazon EKS clusters.

    \n
    \n \n

    When you update the desiredvCpus setting, the value must be between the\n minvCpus and maxvCpus values.

    \n

    Additionally, the updated desiredvCpus value must be greater than or equal to\n the current desiredvCpus value. For more information, see Troubleshooting\n Batch in the Batch User Guide.

    \n
    " } }, "subnets": { @@ -1951,7 +1951,7 @@ "allocationStrategy": { "target": "com.amazonaws.batch#CRUpdateAllocationStrategy", "traits": { - "smithy.api#documentation": "

    The allocation strategy to use for the compute resource if there's not enough instances of\n the best fitting instance type that can be allocated. This might be because of availability of\n the instance type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

    \n

    When updating a compute environment, changing the allocation strategy requires an\n infrastructure update of the compute environment. For more information, see Updating compute\n environments in the Batch User Guide. BEST_FIT isn't\n supported when updating a compute environment.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    \n
    \n
    BEST_FIT_PROGRESSIVE
    \n
    \n

    Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

    \n
    \n
    SPOT_CAPACITY_OPTIMIZED
    \n
    \n

    Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

    \n
    \n
    SPOT_PRICE_CAPACITY_OPTIMIZED
    \n
    \n

    The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

    \n
    \n
    \n

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

    " + "smithy.api#documentation": "

    The allocation strategy to use for the compute resource if there's not enough instances of\n the best fitting instance type that can be allocated. This might be because of availability of\n the instance type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

    \n

    When updating a compute environment, changing the allocation strategy requires an\n infrastructure update of the compute environment. For more information, see Updating compute\n environments in the Batch User Guide. BEST_FIT isn't\n supported when updating a compute environment.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    \n
    \n
    BEST_FIT_PROGRESSIVE
    \n
    \n

    Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

    \n
    \n
    SPOT_CAPACITY_OPTIMIZED
    \n
    \n

    Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

    \n
    \n
    SPOT_PRICE_CAPACITY_OPTIMIZED
    \n
    \n

    The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

    \n
    \n
    \n

    With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, \n and the BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

    " } }, "instanceTypes": { @@ -1969,7 +1969,7 @@ "instanceRole": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

    The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment.\n Required for Amazon EC2\n instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance\n profile. For example, \n ecsInstanceRole\n or\n arn:aws:iam:::instance-profile/ecsInstanceRole\n .\n For more information, see Amazon ECS instance role in the Batch User Guide.

    \n

    When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " + "smithy.api#documentation": "

    The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment.\n Required for Amazon EC2 instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance\n profile. For example, \n ecsInstanceRole\n or\n arn:aws:iam:::instance-profile/ecsInstanceRole\n .\n For more information, see Amazon ECS instance role in the Batch User Guide.

    \n

    When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " } }, "tags": { @@ -1987,7 +1987,7 @@ "bidPercentage": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

    The maximum percentage that a Spot Instance price can be when compared with the On-Demand\n price for that instance type before instances are launched. For example, if your maximum\n percentage is 20%, the Spot price must be less than 20% of the current On-Demand price for that\n Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum\n percentage. For most use\n cases, we recommend leaving this field empty.

    \n

    When updating a compute environment, changing the bid percentage requires an infrastructure\n update of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " + "smithy.api#documentation": "

    The maximum percentage that a Spot Instance price can be when compared with the On-Demand\n price for that instance type before instances are launched. For example, if your maximum\n percentage is 20%, the Spot price must be less than 20% of the current On-Demand price for that\n Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum\n percentage. For most use cases, we recommend leaving this field empty.

    \n

    When updating a compute environment, changing the bid percentage requires an infrastructure\n update of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

    \n \n

    This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

    \n
    " } }, "launchTemplate": { @@ -2061,7 +2061,7 @@ "executionRoleArn": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

    The Amazon Resource Name (ARN) of the\n execution\n role that Batch can assume. For more information,\n see Batch execution IAM\n role in the Batch User Guide.

    " + "smithy.api#documentation": "

    The Amazon Resource Name (ARN) of the execution role that Batch can assume. For more information,\n see Batch execution IAM\n role in the Batch User Guide.

    " } }, "volumes": { @@ -2263,7 +2263,7 @@ "image": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

    Required.\n The image used to start a container. This string is passed directly to the\n Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are\n specified with\n \n repository-url/image:tag\n .\n It can be 255 characters long. It can contain uppercase and lowercase letters, numbers,\n hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the\n Create a container section of the Docker Remote API and the IMAGE\n parameter of docker run.

    \n \n

    Docker image architecture must match the processor architecture of the compute resources\n that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based\n compute resources.

    \n
    \n
      \n
    • \n

      Images in Amazon ECR Public repositories use the full registry/repository[:tag] or\n registry/repository[@digest] naming conventions. For example,\n public.ecr.aws/registry_alias/my-web-app:latest\n .

      \n
    • \n
    • \n

      Images in Amazon ECR repositories use the full registry and repository URI (for example,\n 123456789012.dkr.ecr..amazonaws.com/).

      \n
    • \n
    • \n

      Images in official repositories on Docker Hub use a single name (for example,\n ubuntu or mongo).

      \n
    • \n
    • \n

      Images in other repositories on Docker Hub are qualified with an organization name (for\n example, amazon/amazon-ecs-agent).

      \n
    • \n
    • \n

      Images in other online repositories are qualified further by a domain name (for example,\n quay.io/assemblyline/ubuntu).

      \n
    • \n
    " + "smithy.api#documentation": "

    Required. The image used to start a container. This string is passed directly to the\n Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are\n specified with\n \n repository-url/image:tag\n .\n It can be 255 characters long. It can contain uppercase and lowercase letters, numbers,\n hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the\n Create a container section of the Docker Remote API and the IMAGE\n parameter of docker run.

    \n \n

    Docker image architecture must match the processor architecture of the compute resources\n that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based\n compute resources.

    \n
    \n
      \n
    • \n

      Images in Amazon ECR Public repositories use the full registry/repository[:tag] or\n registry/repository[@digest] naming conventions. For example,\n public.ecr.aws/registry_alias/my-web-app:latest\n .

      \n
    • \n
    • \n

      Images in Amazon ECR repositories use the full registry and repository URI (for example,\n 123456789012.dkr.ecr..amazonaws.com/).

      \n
    • \n
    • \n

      Images in official repositories on Docker Hub use a single name (for example,\n ubuntu or mongo).

      \n
    • \n
    • \n

      Images in other repositories on Docker Hub are qualified with an organization name (for\n example, amazon/amazon-ecs-agent).

      \n
    • \n
    • \n

      Images in other online repositories are qualified further by a domain name (for example,\n quay.io/assemblyline/ubuntu).

      \n
    • \n
    " } }, "vcpus": { @@ -2406,7 +2406,7 @@ } }, "traits": { - "smithy.api#documentation": "

    Container properties are used\n for\n Amazon ECS based job definitions. These properties to describe the container that's\n launched as part of a job.

    " + "smithy.api#documentation": "

    Container properties are used for Amazon ECS based job definitions. These properties to describe the \n container that's launched as part of a job.

    " } }, "com.amazonaws.batch#ContainerSummary": { @@ -2446,7 +2446,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Creates an Batch compute environment. You can create MANAGED or\n UNMANAGED compute environments. MANAGED compute environments can\n use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use\n EC2 resources.

    \n

    In a managed compute environment, Batch manages the capacity and instance types of the\n compute resources within the environment. This is based on the compute resource specification\n that you define or the launch template that you\n specify when you create the compute environment. Either, you can choose to use EC2 On-Demand\n Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in\n your managed compute environment. You can optionally set a maximum price so that Spot\n Instances only launch when the Spot Instance price is less than a specified percentage of the\n On-Demand price.

    \n \n

    Multi-node parallel jobs aren't supported on Spot Instances.

    \n
    \n

    In an unmanaged compute environment, you can manage your own EC2 compute resources and\n have flexibility with how you configure your compute resources. For example, you can use\n custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance\n AMI specification. For more information, see container instance AMIs in the\n Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment,\n you can use the DescribeComputeEnvironments operation to find the Amazon ECS\n cluster that's associated with it. Then, launch your container instances into that Amazon ECS\n cluster. For more information, see Launching an Amazon ECS container\n instance in the Amazon Elastic Container Service Developer Guide.

    \n \n

    To create a compute environment that uses EKS resources, the caller must have\n permissions to call eks:DescribeCluster.

    \n
    \n \n

    Batch doesn't automatically upgrade the AMIs in a compute environment after it's\n created. For example, it also doesn't update the AMIs in your compute environment when a\n newer version of the Amazon ECS optimized AMI is available. You're responsible for the management\n of the guest operating system. This includes any updates and security patches. You're also\n responsible for any additional application software or utilities that you install on the\n compute resources. There are two ways to use a new AMI for your Batch jobs. The original\n method is to complete these steps:

    \n
      \n
    1. \n

      Create a new compute environment with the new AMI.

      \n
    2. \n
    3. \n

      Add the compute environment to an existing job queue.

      \n
    4. \n
    5. \n

      Remove the earlier compute environment from your job queue.

      \n
    6. \n
    7. \n

      Delete the earlier compute environment.

      \n
    8. \n
    \n

    In April 2022, Batch added enhanced support for updating compute environments. For\n more information, see Updating compute environments.\n To use the enhanced updating of compute environments to update AMIs, follow these\n rules:

    \n
      \n
    • \n

      Either don't set the service role (serviceRole) parameter or set it to\n the AWSBatchServiceRole service-linked role.

      \n
    • \n
    • \n

      Set the allocation strategy (allocationStrategy) parameter to\n BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or\n SPOT_PRICE_CAPACITY_OPTIMIZED.

      \n
    • \n
    • \n

      Set the update to latest image version (updateToLatestImageVersion)\n parameter to\n true.\n The updateToLatestImageVersion parameter is used when you update a compute\n environment. This parameter is ignored when you create a compute\n environment.

      \n
    • \n
    • \n

      Don't specify an AMI ID in imageId, imageIdOverride (in\n \n ec2Configuration\n ), or in the launch template\n (launchTemplate). In that case, Batch selects the latest Amazon ECS\n optimized AMI that's supported by Batch at the time the infrastructure update is\n initiated. Alternatively, you can specify the AMI ID in the imageId or\n imageIdOverride parameters, or the launch template identified by the\n LaunchTemplate properties. Changing any of these properties starts an\n infrastructure update. If the AMI ID is specified in the launch template, it can't be\n replaced by specifying an AMI ID in either the imageId or\n imageIdOverride parameters. It can only be replaced by specifying a\n different launch template, or if the launch template version is set to\n $Default or $Latest, by setting either a new default version\n for the launch template (if $Default) or by adding a new version to the\n launch template (if $Latest).

      \n
    • \n
    \n

    If these rules are followed, any update that starts an infrastructure update causes the\n AMI ID to be re-selected. If the version setting in the launch template\n (launchTemplate) is set to $Latest or $Default, the\n latest or default version of the launch template is evaluated up at the time of the\n infrastructure update, even if the launchTemplate wasn't updated.

    \n
    ", + "smithy.api#documentation": "

    Creates an Batch compute environment. You can create MANAGED or\n UNMANAGED compute environments. MANAGED compute environments can\n use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use\n EC2 resources.

    \n

    In a managed compute environment, Batch manages the capacity and instance types of the\n compute resources within the environment. This is based on the compute resource specification\n that you define or the launch template that you\n specify when you create the compute environment. Either, you can choose to use EC2 On-Demand\n Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in\n your managed compute environment. You can optionally set a maximum price so that Spot\n Instances only launch when the Spot Instance price is less than a specified percentage of the\n On-Demand price.

    \n \n

    Multi-node parallel jobs aren't supported on Spot Instances.

    \n
    \n

    In an unmanaged compute environment, you can manage your own EC2 compute resources and\n have flexibility with how you configure your compute resources. For example, you can use\n custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance\n AMI specification. For more information, see container instance AMIs in the\n Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment,\n you can use the DescribeComputeEnvironments operation to find the Amazon ECS\n cluster that's associated with it. Then, launch your container instances into that Amazon ECS\n cluster. For more information, see Launching an Amazon ECS container\n instance in the Amazon Elastic Container Service Developer Guide.

    \n \n

    To create a compute environment that uses EKS resources, the caller must have\n permissions to call eks:DescribeCluster.

    \n
    \n \n

    Batch doesn't automatically upgrade the AMIs in a compute environment after it's\n created. For example, it also doesn't update the AMIs in your compute environment when a\n newer version of the Amazon ECS optimized AMI is available. You're responsible for the management\n of the guest operating system. This includes any updates and security patches. You're also\n responsible for any additional application software or utilities that you install on the\n compute resources. There are two ways to use a new AMI for your Batch jobs. The original\n method is to complete these steps:

    \n
      \n
    1. \n

      Create a new compute environment with the new AMI.

      \n
    2. \n
    3. \n

      Add the compute environment to an existing job queue.

      \n
    4. \n
    5. \n

      Remove the earlier compute environment from your job queue.

      \n
    6. \n
    7. \n

      Delete the earlier compute environment.

      \n
    8. \n
    \n

    In April 2022, Batch added enhanced support for updating compute environments. For\n more information, see Updating compute environments.\n To use the enhanced updating of compute environments to update AMIs, follow these\n rules:

    \n
      \n
    • \n

      Either don't set the service role (serviceRole) parameter or set it to\n the AWSBatchServiceRole service-linked role.

      \n
    • \n
    • \n

      Set the allocation strategy (allocationStrategy) parameter to\n BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or\n SPOT_PRICE_CAPACITY_OPTIMIZED.

      \n
    • \n
    • \n

      Set the update to latest image version (updateToLatestImageVersion)\n parameter to true. The updateToLatestImageVersion parameter \n is used when you update a compute environment. This parameter is ignored when you create \n a compute environment.

      \n
    • \n
    • \n

      Don't specify an AMI ID in imageId, imageIdOverride (in\n \n ec2Configuration\n ), or in the launch template\n (launchTemplate). In that case, Batch selects the latest Amazon ECS\n optimized AMI that's supported by Batch at the time the infrastructure update is\n initiated. Alternatively, you can specify the AMI ID in the imageId or\n imageIdOverride parameters, or the launch template identified by the\n LaunchTemplate properties. Changing any of these properties starts an\n infrastructure update. If the AMI ID is specified in the launch template, it can't be\n replaced by specifying an AMI ID in either the imageId or\n imageIdOverride parameters. It can only be replaced by specifying a\n different launch template, or if the launch template version is set to\n $Default or $Latest, by setting either a new default version\n for the launch template (if $Default) or by adding a new version to the\n launch template (if $Latest).

      \n
    • \n
    \n

    If these rules are followed, any update that starts an infrastructure update causes the\n AMI ID to be re-selected. If the version setting in the launch template\n (launchTemplate) is set to $Latest or $Default, the\n latest or default version of the launch template is evaluated up at the time of the\n infrastructure update, even if the launchTemplate wasn't updated.

    \n
    ", "smithy.api#examples": [ { "title": "To create a managed EC2 compute environment", @@ -3949,6 +3949,15 @@ "smithy.api#documentation": "

    The properties for a task definition that describes the container and volume definitions of\n an Amazon ECS task. You can specify which Docker images to use, the required resources, and other\n configurations related to launching the task definition through an Amazon ECS service or task.

    " } }, + "com.amazonaws.batch#EksAnnotationsMap": { + "type": "map", + "key": { + "target": "com.amazonaws.batch#String" + }, + "value": { + "target": "com.amazonaws.batch#String" + } + }, "com.amazonaws.batch#EksAttemptContainerDetail": { "type": "structure", "members": { @@ -4380,6 +4389,12 @@ "smithy.api#documentation": "

    The path on the container where the volume is mounted.

    " } }, + "subPath": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#documentation": "

    A sub-path inside the referenced volume instead of its root.

    " + } + }, "readOnly": { "target": "com.amazonaws.batch#Boolean", "traits": { @@ -4463,10 +4478,44 @@ "traits": { "smithy.api#documentation": "

    Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63\n uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be\n added or modified at any time. Each resource can have multiple labels, but each key must be\n unique for a given object.

    " } + }, + "annotations": { + "target": "com.amazonaws.batch#EksAnnotationsMap", + "traits": { + "smithy.api#documentation": "

    Key-value pairs used to attach arbitrary, non-identifying metadata to Kubernetes objects. \n Valid annotation keys have two segments: an optional prefix and a name, separated by a \n slash (/).

    \n
      \n
    • \n

      The prefix is optional and must be 253 characters or less. If specified, the prefix \n must be a DNS subdomain− a series of DNS labels separated by dots (.), and it must \n end with a slash (/).

      \n
    • \n
    • \n

      The name segment is required and must be 63 characters or less. It can include alphanumeric \n characters ([a-z0-9A-Z]), dashes (-), underscores (_), and dots (.), but must begin and end \n with an alphanumeric character.

      \n
    • \n
    \n \n

    Annotation values must be 255 characters or less.

    \n
    \n

    Annotations can be added or modified at any time. Each resource can have multiple annotations.

    " + } + }, + "namespace": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#documentation": "

    The namespace of the Amazon EKS cluster. In Kubernetes, namespaces provide a mechanism for isolating \n groups of resources within a single cluster. Names of resources need to be unique within a namespace, \n but not across namespaces. Batch places Batch Job pods in this namespace. If this field is provided, \n the value can't be empty or null. It must meet the following requirements:

    \n
      \n
    • \n

      1-63 characters long

      \n
    • \n
    • \n

      Can't be set to default

      \n
    • \n
    • \n

      Can't start with kube\n

      \n
    • \n
    • \n

      Must match the following regular expression:\n ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$\n

      \n
    • \n
    \n

    \n For more information, see \n Namespaces in the Kubernetes documentation. This namespace can be \n different from the kubernetesNamespace set in the compute environment's \n EksConfiguration, but must have identical role-based access control (RBAC) roles as \n the compute environment's kubernetesNamespace. For multi-node parallel jobs,\n the same value must be provided across all the node ranges.

    " + } } }, "traits": { - "smithy.api#documentation": "

    Describes and uniquely identifies Kubernetes resources. For example, the compute environment that\n a pod runs in or the jobID for a job running in the pod. For more information, see\n Understanding Kubernetes Objects in the Kubernetes documentation.

    " + "smithy.api#documentation": "

    Describes and uniquely identifies Kubernetes resources. For example, the compute environment that\n a pod runs in or the jobID for a job running in the pod. For more information, see\n \n Understanding Kubernetes Objects in the Kubernetes documentation.

    " + } + }, + "com.amazonaws.batch#EksPersistentVolumeClaim": { + "type": "structure", + "members": { + "claimName": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

    The name of the persistentVolumeClaim bounded to a persistentVolume. \n For more information, see \n Persistent Volume Claims in the Kubernetes documentation.

    ", + "smithy.api#required": {} + } + }, + "readOnly": { + "target": "com.amazonaws.batch#Boolean", + "traits": { + "smithy.api#documentation": "

    An optional boolean value indicating if the mount is read only. Default is false. For more\n information, see \n Read Only Mounts in the Kubernetes documentation.

    " + } + } + }, + "traits": { + "smithy.api#documentation": "

    A persistentVolumeClaim volume is used to mount a PersistentVolume\n into a Pod. PersistentVolumeClaims are a way for users to \"claim\" durable storage without knowing \n the details of the particular cloud environment. See the information about PersistentVolumes\n in the Kubernetes documentation.

    " } }, "com.amazonaws.batch#EksPodProperties": { @@ -4517,7 +4566,7 @@ "metadata": { "target": "com.amazonaws.batch#EksMetadata", "traits": { - "smithy.api#documentation": "

    Metadata about the\n Kubernetes\n pod. For\n more information, see Understanding Kubernetes Objects in the Kubernetes\n documentation.

    " + "smithy.api#documentation": "

    Metadata about the Kubernetes pod. For more information, see Understanding Kubernetes Objects in the Kubernetes\n documentation.

    " } }, "shareProcessNamespace": { @@ -4623,7 +4672,7 @@ "metadata": { "target": "com.amazonaws.batch#EksMetadata", "traits": { - "smithy.api#documentation": "

    Metadata about the\n overrides for the container that's used on the Amazon EKS pod.

    " + "smithy.api#documentation": "

    Metadata about the overrides for the container that's used on the Amazon EKS pod.

    " } } }, @@ -4732,6 +4781,12 @@ "traits": { "smithy.api#documentation": "

    Specifies the configuration of a Kubernetes secret volume. For more information, see\n secret in the\n Kubernetes documentation.

    " } + }, + "persistentVolumeClaim": { + "target": "com.amazonaws.batch#EksPersistentVolumeClaim", + "traits": { + "smithy.api#documentation": "

    Specifies the configuration of a Kubernetes persistentVolumeClaim bounded to a \n persistentVolume. For more information, see \n Persistent Volume Claims in the Kubernetes documentation.

    " + } } }, "traits": { @@ -4772,7 +4827,7 @@ "onStatusReason": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

    Contains a glob pattern to match against the StatusReason returned for a job.\n The pattern can contain up to 512 characters. It can contain letters, numbers, periods (.),\n colons (:), and white spaces (including spaces or tabs).\n It can\n optionally end with an asterisk (*) so that only the start of the string needs to be an exact\n match.

    " + "smithy.api#documentation": "

    Contains a glob pattern to match against the StatusReason returned for a job.\n The pattern can contain up to 512 characters. It can contain letters, numbers, periods (.),\n colons (:), and white spaces (including spaces or tabs). It can optionally end with an asterisk (*) \n so that only the start of the string needs to be an exact match.

    " } }, "onReason": { @@ -6975,13 +7030,13 @@ "operatingSystemFamily": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

    The operating system for the compute environment.\n Valid values are:\n LINUX (default), WINDOWS_SERVER_2019_CORE,\n WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and\n WINDOWS_SERVER_2022_FULL.

    \n \n

    The following parameters can’t be set for Windows containers: linuxParameters,\n privileged, user, ulimits,\n readonlyRootFilesystem,\n and efsVolumeConfiguration.

    \n
    \n \n

    The Batch Scheduler checks\n the compute environments\n that are attached to the job queue before registering a task definition with\n Fargate. In this\n scenario, the job queue is where the job is submitted. If the job requires a\n Windows container and the first compute environment is LINUX, the compute\n environment is skipped and the next compute environment is checked until a Windows-based compute\n environment is found.

    \n
    \n \n

    Fargate Spot is not supported for\n ARM64 and\n Windows-based containers on Fargate. A job queue will be blocked if a\n Fargate\n ARM64 or\n Windows job is submitted to a job queue with only Fargate Spot compute environments.\n However, you can attach both FARGATE and\n FARGATE_SPOT compute environments to the same job queue.

    \n
    " + "smithy.api#documentation": "

    The operating system for the compute environment. Valid values are:\n LINUX (default), WINDOWS_SERVER_2019_CORE,\n WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and\n WINDOWS_SERVER_2022_FULL.

    \n \n

    The following parameters can’t be set for Windows containers: linuxParameters,\n privileged, user, ulimits,\n readonlyRootFilesystem, and efsVolumeConfiguration.

    \n
    \n \n

    The Batch Scheduler checks the compute environments that are attached to the job queue before \n registering a task definition with Fargate. In this scenario, the job queue is where the job is \n submitted. If the job requires a Windows container and the first compute environment is LINUX, \n the compute environment is skipped and the next compute environment is checked until a Windows-based \n compute environment is found.

    \n
    \n \n

    Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. \n A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job \n queue with only Fargate Spot compute environments. However, you can attach both FARGATE and\n FARGATE_SPOT compute environments to the same job queue.

    \n
    " } }, "cpuArchitecture": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

    The vCPU architecture. The default value is X86_64. Valid values are\n X86_64 and ARM64.

    \n \n

    This parameter must be set to\n X86_64\n for Windows containers.

    \n
    \n \n

    Fargate Spot is not supported for ARM64 and Windows-based containers on\n Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is\n submitted to a job queue with only Fargate Spot compute environments. However, you can attach\n both FARGATE and FARGATE_SPOT compute environments to the same job\n queue.

    \n
    " + "smithy.api#documentation": "

    The vCPU architecture. The default value is X86_64. Valid values are\n X86_64 and ARM64.

    \n \n

    This parameter must be set to X86_64 for Windows containers.

    \n
    \n \n

    Fargate Spot is not supported for ARM64 and Windows-based containers on\n Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is\n submitted to a job queue with only Fargate Spot compute environments. However, you can attach\n both FARGATE and FARGATE_SPOT compute environments to the same job\n queue.

    \n
    " } } }, @@ -7201,7 +7256,7 @@ "schedulingPriorityOverride": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

    The scheduling priority for the job. This only affects jobs in job queues with a fair\n share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority.\n This\n overrides any scheduling priority in the job definition and works only within a single share\n identifier.

    \n

    The minimum supported value is 0 and the maximum supported value is 9999.

    " + "smithy.api#documentation": "

    The scheduling priority for the job. This only affects jobs in job queues with a fair\n share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority. This overrides any scheduling priority in the job definition and works only \n within a single share identifier.

    \n

    The minimum supported value is 0 and the maximum supported value is 9999.

    " } }, "arrayProperties": {