diff --git a/.changes/2.1585.0.json b/.changes/2.1585.0.json new file mode 100644 index 0000000000..80fce78c19 --- /dev/null +++ b/.changes/2.1585.0.json @@ -0,0 +1,32 @@ +[ + { + "type": "feature", + "category": "CodeBuild", + "description": "Supporting GitLab and GitLab Self Managed as source types in AWS CodeBuild." + }, + { + "type": "feature", + "category": "EC2", + "description": "Added support for ModifyInstanceMetadataDefaults and GetInstanceMetadataDefaults to set Instance Metadata Service account defaults" + }, + { + "type": "feature", + "category": "EMRcontainers", + "description": "This release increases the number of supported job template parameters from 20 to 100." + }, + { + "type": "feature", + "category": "GlobalAccelerator", + "description": "AWS Global Accelerator now supports cross-account sharing for bring your own IP addresses." + }, + { + "type": "feature", + "category": "MediaLive", + "description": "Exposing TileMedia H265 options" + }, + { + "type": "feature", + "category": "SageMaker", + "description": "Introduced support for the following new instance types on SageMaker Studio for JupyterLab and CodeEditor applications: m6i, m6id, m7i, c6i, c6id, c7i, r6i, r6id, r7i, and p5" + } +] \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index d0c209ebef..21abb021c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,15 @@ # Changelog for AWS SDK for JavaScript - + +## 2.1585.0 +* feature: CodeBuild: Supporting GitLab and GitLab Self Managed as source types in AWS CodeBuild. +* feature: EC2: Added support for ModifyInstanceMetadataDefaults and GetInstanceMetadataDefaults to set Instance Metadata Service account defaults +* feature: EMRcontainers: This release increases the number of supported job template parameters from 20 to 100. +* feature: GlobalAccelerator: AWS Global Accelerator now supports cross-account sharing for bring your own IP addresses. +* feature: MediaLive: Exposing TileMedia H265 options +* feature: SageMaker: Introduced support for the following new instance types on SageMaker Studio for JupyterLab and CodeEditor applications: m6i, m6id, m7i, c6i, c6id, c7i, r6i, r6id, r7i, and p5 + ## 2.1584.0 * feature: Kendra: Documentation update, March 2024. Corrects some docs for Amazon Kendra. * feature: Pricing: Add ResourceNotFoundException to ListPriceLists and GetPriceListFileUrl APIs diff --git a/README.md b/README.md index c8c0ef0255..7dd2639723 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ require('aws-sdk/lib/maintenance_mode_message').suppress = true; To use the SDK in the browser, simply add the following script tag to your HTML pages: - + You can also build a custom browser SDK with your specified set of AWS services. This can allow you to reduce the SDK's size, specify different API versions of diff --git a/apis/codebuild-2016-10-06.min.json b/apis/codebuild-2016-10-06.min.json index bb6b25d2ea..45e8899fda 100644 --- a/apis/codebuild-2016-10-06.min.json +++ b/apis/codebuild-2016-10-06.min.json @@ -1077,7 +1077,8 @@ "members": { "arn": {}, "serverType": {}, - "authType": {} + "authType": {}, + "resource": {} } } } diff --git a/apis/codebuild-2016-10-06.normal.json b/apis/codebuild-2016-10-06.normal.json index 644e11e0dd..f4d8ace843 100644 --- a/apis/codebuild-2016-10-06.normal.json +++ b/apis/codebuild-2016-10-06.normal.json @@ -1097,7 +1097,8 @@ "enum": [ "OAUTH", "BASIC_AUTH", - "PERSONAL_ACCESS_TOKEN" + "PERSONAL_ACCESS_TOKEN", + "CODECONNECTIONS" ] }, "BatchDeleteBuildsInput": { @@ -2733,7 +2734,7 @@ "members": { "statusCode": { "shape": "FleetStatusCode", - "documentation": "
The status code of the compute fleet. Valid values include:
CREATING
: The compute fleet is being created.
UPDATING
: The compute fleet is being updated.
ROTATING
: The compute fleet is being rotated.
DELETING
: The compute fleet is being deleted.
CREATE_FAILED
: The compute fleet has failed to create.
UPDATE_ROLLBACK_FAILED
: The compute fleet has failed to update and could not rollback to previous state.
ACTIVE
: The compute fleet has succeeded and is active.
The status code of the compute fleet. Valid values include:
CREATING
: The compute fleet is being created.
UPDATING
: The compute fleet is being updated.
ROTATING
: The compute fleet is being rotated.
PENDING_DELETION
: The compute fleet is pending deletion.
DELETING
: The compute fleet is being deleted.
CREATE_FAILED
: The compute fleet has failed to create.
UPDATE_ROLLBACK_FAILED
: The compute fleet has failed to update and could not rollback to previous state.
ACTIVE
: The compute fleet has succeeded and is active.
The type of repository that contains the source code to be built. Valid values include:
BITBUCKET
: The source code is in a Bitbucket repository.
CODECOMMIT
: The source code is in an CodeCommit repository.
CODEPIPELINE
: The source code settings are specified in the source action of a pipeline in CodePipeline.
GITHUB
: The source code is in a GitHub or GitHub Enterprise Cloud repository.
GITHUB_ENTERPRISE
: The source code is in a GitHub Enterprise Server repository.
NO_SOURCE
: The project does not have input source code.
S3
: The source code is in an Amazon S3 bucket.
The type of repository that contains the source code to be built. Valid values include:
BITBUCKET
: The source code is in a Bitbucket repository.
CODECOMMIT
: The source code is in an CodeCommit repository.
CODEPIPELINE
: The source code settings are specified in the source action of a pipeline in CodePipeline.
GITHUB
: The source code is in a GitHub repository.
GITHUB_ENTERPRISE
: The source code is in a GitHub Enterprise Server repository.
GITLAB
: The source code is in a GitLab repository.
GITLAB_SELF_MANAGED
: The source code is in a self-managed GitLab repository.
NO_SOURCE
: The project does not have input source code.
S3
: The source code is in an Amazon S3 bucket.
Information about the location of the source code to be built. Valid values include:
For source code settings that are specified in the source action of a pipeline in CodePipeline, location
should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.
For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>
).
For source code in an Amazon S3 input bucket, one of the following.
The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip
).
The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/
).
For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitHub account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source
object, set the auth
object's type
value to OAUTH
.
For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source
object, set the auth
object's type
value to OAUTH
.
If you specify CODEPIPELINE
for the Type
property, don't specify this property. For all of the other types, you must specify Location
.
Information about the location of the source code to be built. Valid values include:
For source code settings that are specified in the source action of a pipeline in CodePipeline, location
should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.
For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>
).
For source code in an Amazon S3 input bucket, one of the following.
The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip
).
The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/
).
For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitHub account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source
object, set the auth
object's type
value to OAUTH
.
For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitLab account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections Authorize application page, choose Authorize. Then on the CodeStar Connections Create GitLab connection page, choose Connect to GitLab. (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to override the default connection and use this connection instead, set the auth
object's type
value to CODECONNECTIONS
in the source
object.
For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source
object, set the auth
object's type
value to OAUTH
.
If you specify CODEPIPELINE
for the Type
property, don't specify this property. For all of the other types, you must specify Location
.
Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
If your project's builds are triggered by a webhook, you must push a new commit to the repo for a change to this property to take effect.
" + "documentation": " Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
The status of a build triggered by a webhook is always reported to your source provider.
If your project's builds are triggered by a webhook, you must push a new commit to the repo for a change to this property to take effect.
" }, "buildStatusConfig": { "shape": "BuildStatusConfig", @@ -3885,7 +3886,7 @@ }, "sourceVersion": { "shape": "String", - "documentation": "The source version for the corresponding source identifier. If specified, must be one of:
For CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example, pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.
" + "documentation": "The source version for the corresponding source identifier. If specified, must be one of:
For CodeCommit: the commit ID, branch, or Git tag to use.
For GitHub or GitLab: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID
(for example, pr/25
). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.
For Amazon S3: the version ID of the object that represents the build input ZIP file to use.
For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.
" } }, "documentation": "A source identifier and its corresponding version.
" @@ -4415,7 +4416,9 @@ "enum": [ "GITHUB", "BITBUCKET", - "GITHUB_ENTERPRISE" + "GITHUB_ENTERPRISE", + "GITLAB", + "GITLAB_SELF_MANAGED" ] }, "SharedResourceSortByType": { @@ -4440,7 +4443,7 @@ "members": { "type": { "shape": "SourceAuthType", - "documentation": "This data type is deprecated and is no longer accurate or used.
The authorization type to use. The only valid value is OAUTH
, which represents the OAuth authorization type.
The authorization type to use. Valid options are OAUTH or CODECONNECTIONS.
" }, "resource": { "shape": "String", @@ -4452,7 +4455,8 @@ "SourceAuthType": { "type": "string", "enum": [ - "OAUTH" + "OAUTH", + "CODECONNECTIONS" ] }, "SourceCredentialsInfo": { @@ -4464,14 +4468,18 @@ }, "serverType": { "shape": "ServerType", - "documentation": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, or BITBUCKET.
" + "documentation": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET.
" }, "authType": { "shape": "AuthType", - "documentation": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, or PERSONAL_ACCESS_TOKEN.
" + "documentation": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.
" + }, + "resource": { + "shape": "String", + "documentation": "The connection ARN if your serverType type is GITLAB or GITLAB_SELF_MANAGED and your authType is CODECONNECTIONS.
" } }, - "documentation": "Information about the credentials for a GitHub, GitHub Enterprise, or Bitbucket repository.
" + "documentation": "Information about the credentials for a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository.
" }, "SourceCredentialsInfos": { "type": "list", @@ -4485,6 +4493,8 @@ "CODECOMMIT", "CODEPIPELINE", "GITHUB", + "GITLAB", + "GITLAB_SELF_MANAGED", "S3", "BITBUCKET", "GITHUB_ENTERPRISE", diff --git a/apis/ec2-2016-11-15.min.json b/apis/ec2-2016-11-15.min.json index 1a01e8a971..e29e34c669 100644 --- a/apis/ec2-2016-11-15.min.json +++ b/apis/ec2-2016-11-15.min.json @@ -18466,6 +18466,40 @@ } } }, + "GetInstanceMetadataDefaults": { + "input": { + "type": "structure", + "members": { + "DryRun": { + "type": "boolean" + } + } + }, + "output": { + "type": "structure", + "members": { + "AccountLevel": { + "locationName": "accountLevel", + "type": "structure", + "members": { + "HttpTokens": { + "locationName": "httpTokens" + }, + "HttpPutResponseHopLimit": { + "locationName": "httpPutResponseHopLimit", + "type": "integer" + }, + "HttpEndpoint": { + "locationName": "httpEndpoint" + }, + "InstanceMetadataTags": { + "locationName": "instanceMetadataTags" + } + } + } + } + } + }, "GetInstanceTypesFromInstanceRequirements": { "input": { "type": "structure", @@ -18479,11 +18513,11 @@ "type": "boolean" }, "ArchitectureTypes": { - "shape": "S208", + "shape": "S20c", "locationName": "ArchitectureType" }, "VirtualizationTypes": { - "shape": "S209", + "shape": "S20d", "locationName": "VirtualizationType" }, "InstanceRequirements": { @@ -19016,7 +19050,7 @@ "locationName": "ipamResourceCidrSet", "type": "list", "member": { - "shape": "S21l", + "shape": "S21p", "locationName": "item" } } @@ -19281,7 +19315,7 @@ "locationName": "paymentDue" }, "ReservedInstanceValueRollup": { - "shape": "S22a", + "shape": "S22e", "locationName": "reservedInstanceValueRollup" }, "ReservedInstanceValueSet": { @@ -19292,7 +19326,7 @@ "type": "structure", "members": { "ReservationValue": { - "shape": "S22a", + "shape": "S22e", "locationName": "reservationValue" }, "ReservedInstanceId": { @@ -19302,7 +19336,7 @@ } }, "TargetConfigurationValueRollup": { - "shape": "S22a", + "shape": "S22e", "locationName": "targetConfigurationValueRollup" }, "TargetConfigurationValueSet": { @@ -19313,7 +19347,7 @@ "type": "structure", "members": { "ReservationValue": { - "shape": "S22a", + "shape": "S22e", "locationName": "reservationValue" }, "TargetConfiguration": { @@ -19462,11 +19496,11 @@ "type": "structure", "members": { "ArchitectureTypes": { - "shape": "S208", + "shape": "S20c", "locationName": "ArchitectureType" }, "VirtualizationTypes": { - "shape": "S209", + "shape": "S20d", "locationName": "VirtualizationType" }, "InstanceRequirements": { @@ -19537,11 +19571,11 @@ "type": "structure", "members": { "SubnetIpv4CidrReservations": { - "shape": "S231", + "shape": "S235", "locationName": "subnetIpv4CidrReservationSet" }, "SubnetIpv6CidrReservations": { - "shape": "S231", + "shape": "S235", "locationName": "subnetIpv6CidrReservationSet" }, "NextToken": { @@ -20118,7 +20152,7 @@ "members": { "Architecture": {}, "ClientData": { - "shape": "S24d" + "shape": "S24h" }, "ClientToken": {}, "Description": {}, @@ -20137,7 +20171,7 @@ "shape": "S18v" }, "UserBucket": { - "shape": "S24g" + "shape": "S24k" } } } @@ -20247,10 +20281,10 @@ "members": { "Description": {}, "Image": { - "shape": "S24n" + "shape": "S24r" }, "Volume": { - "shape": "S24o" + "shape": "S24s" } } } @@ -20373,7 +20407,7 @@ "type": "structure", "members": { "ClientData": { - "shape": "S24d" + "shape": "S24h" }, "ClientToken": {}, "Description": {}, @@ -20386,7 +20420,7 @@ "shape": "S18v" }, "UserBucket": { - "shape": "S24g" + "shape": "S24k" } } }, @@ -20444,11 +20478,11 @@ "type": "boolean" }, "Image": { - "shape": "S24n", + "shape": "S24r", "locationName": "image" }, "Volume": { - "shape": "S24o", + "shape": "S24s", "locationName": "volume" } } @@ -20897,25 +20931,25 @@ "Attribute": {}, "OperationType": {}, "UserIds": { - "shape": "S25y", + "shape": "S262", "locationName": "UserId" }, "UserGroups": { - "shape": "S25z", + "shape": "S263", "locationName": "UserGroup" }, "ProductCodes": { - "shape": "S260", + "shape": "S264", "locationName": "ProductCode" }, "LoadPermission": { "type": "structure", "members": { "Add": { - "shape": "S262" + "shape": "S266" }, "Remove": { - "shape": "S262" + "shape": "S266" } } }, @@ -20961,7 +20995,7 @@ "locationName": "successful" }, "Unsuccessful": { - "shape": "S267", + "shape": "S26b", "locationName": "unsuccessful" } } @@ -21029,15 +21063,15 @@ }, "OperationType": {}, "ProductCodes": { - "shape": "S260", + "shape": "S264", "locationName": "ProductCode" }, "UserGroups": { - "shape": "S25z", + "shape": "S263", "locationName": "UserGroup" }, "UserIds": { - "shape": "S25y", + "shape": "S262", "locationName": "UserId" }, "Value": {}, @@ -21182,7 +21216,7 @@ "members": { "InstanceId": {}, "CapacityReservationSpecification": { - "shape": "S26k" + "shape": "S26o" }, "DryRun": { "type": "boolean" @@ -21355,6 +21389,31 @@ } } }, + "ModifyInstanceMetadataDefaults": { + "input": { + "type": "structure", + "members": { + "HttpTokens": {}, + "HttpPutResponseHopLimit": { + "type": "integer" + }, + "HttpEndpoint": {}, + "InstanceMetadataTags": {}, + "DryRun": { + "type": "boolean" + } + } + }, + "output": { + "type": "structure", + "members": { + "Return": { + "locationName": "return", + "type": "boolean" + } + } + } + }, "ModifyInstanceMetadataOptions": { "input": { "type": "structure", @@ -21442,7 +21501,7 @@ "locationName": "AddOperatingRegion" }, "RemoveOperatingRegions": { - "shape": "S279", + "shape": "S27i", "locationName": "RemoveOperatingRegion" }, "Tier": {} @@ -21533,7 +21592,7 @@ "type": "structure", "members": { "IpamResourceCidr": { - "shape": "S21l", + "shape": "S21p", "locationName": "ipamResourceCidr" } } @@ -21556,7 +21615,7 @@ "locationName": "AddOperatingRegion" }, "RemoveOperatingRegions": { - "shape": "S279", + "shape": "S27i", "locationName": "RemoveOperatingRegion" } } @@ -21890,7 +21949,7 @@ "OperationType": {}, "SnapshotId": {}, "UserIds": { - "shape": "S25y", + "shape": "S262", "locationName": "UserId" }, "DryRun": { @@ -22847,13 +22906,13 @@ ], "members": { "AccepterPeeringConnectionOptions": { - "shape": "S29y" + "shape": "S2a7" }, "DryRun": { "type": "boolean" }, "RequesterPeeringConnectionOptions": { - "shape": "S29y" + "shape": "S2a7" }, "VpcPeeringConnectionId": {} } @@ -22862,11 +22921,11 @@ "type": "structure", "members": { "AccepterPeeringConnectionOptions": { - "shape": "S2a0", + "shape": "S2a9", "locationName": "accepterPeeringConnectionOptions" }, "RequesterPeeringConnectionOptions": { - "shape": "S2a0", + "shape": "S2a9", "locationName": "requesterPeeringConnectionOptions" } } @@ -23090,7 +23149,7 @@ "type": "structure", "members": { "InstanceMonitorings": { - "shape": "S2af", + "shape": "S2ao", "locationName": "instancesSet" } } @@ -23826,7 +23885,7 @@ "locationName": "successful" }, "Unsuccessful": { - "shape": "S267", + "shape": "S26b", "locationName": "unsuccessful" } } @@ -24895,7 +24954,7 @@ } }, "CapacityReservationSpecification": { - "shape": "S26k" + "shape": "S26o" }, "HibernationOptions": { "type": "structure", @@ -25061,7 +25120,7 @@ "type": "integer" }, "Groups": { - "shape": "S2e9", + "shape": "S2ei", "locationName": "Group" }, "Ipv6AddressCount": { @@ -25110,7 +25169,7 @@ }, "RamdiskId": {}, "SecurityGroupIds": { - "shape": "S2e9", + "shape": "S2ei", "locationName": "SecurityGroupId" }, "SubnetId": {}, @@ -25324,7 +25383,7 @@ "type": "structure", "members": { "StartingInstances": { - "shape": "S2ex", + "shape": "S2f6", "locationName": "instancesSet" } } @@ -25451,7 +25510,7 @@ "type": "structure", "members": { "StoppingInstances": { - "shape": "S2ex", + "shape": "S2f6", "locationName": "instancesSet" } } @@ -25526,7 +25585,7 @@ "type": "structure", "members": { "TerminatingInstances": { - "shape": "S2ex", + "shape": "S2f6", "locationName": "instancesSet" } } @@ -25667,7 +25726,7 @@ "type": "structure", "members": { "InstanceMonitorings": { - "shape": "S2af", + "shape": "S2ao", "locationName": "instancesSet" } } @@ -25686,7 +25745,7 @@ "shape": "S6x" }, "SecurityGroupRuleDescriptions": { - "shape": "S2fn", + "shape": "S2fw", "locationName": "SecurityGroupRuleDescription" } } @@ -25714,7 +25773,7 @@ "shape": "S6x" }, "SecurityGroupRuleDescriptions": { - "shape": "S2fn", + "shape": "S2fw", "locationName": "SecurityGroupRuleDescription" } } @@ -35603,19 +35662,19 @@ } } }, - "S208": { + "S20c": { "type": "list", "member": { "locationName": "item" } }, - "S209": { + "S20d": { "type": "list", "member": { "locationName": "item" } }, - "S21l": { + "S21p": { "type": "structure", "members": { "IpamId": { @@ -35667,7 +35726,7 @@ } } }, - "S22a": { + "S22e": { "type": "structure", "members": { "HourlyPrice": { @@ -35681,14 +35740,14 @@ } } }, - "S231": { + "S235": { "type": "list", "member": { "shape": "So6", "locationName": "item" } }, - "S24d": { + "S24h": { "type": "structure", "members": { "Comment": {}, @@ -35703,14 +35762,14 @@ } } }, - "S24g": { + "S24k": { "type": "structure", "members": { "S3Bucket": {}, "S3Key": {} } }, - "S24n": { + "S24r": { "type": "structure", "required": [ "Bytes", @@ -35731,7 +35790,7 @@ } } }, - "S24o": { + "S24s": { "type": "structure", "required": [ "Size" @@ -35743,25 +35802,25 @@ } } }, - "S25y": { + "S262": { "type": "list", "member": { "locationName": "UserId" } }, - "S25z": { + "S263": { "type": "list", "member": { "locationName": "UserGroup" } }, - "S260": { + "S264": { "type": "list", "member": { "locationName": "ProductCode" } }, - "S262": { + "S266": { "type": "list", "member": { "locationName": "item", @@ -35772,14 +35831,14 @@ } } }, - "S267": { + "S26b": { "type": "list", "member": { "shape": "S1i", "locationName": "item" } }, - "S26k": { + "S26o": { "type": "structure", "members": { "CapacityReservationPreference": {}, @@ -35788,7 +35847,7 @@ } } }, - "S279": { + "S27i": { "type": "list", "member": { "type": "structure", @@ -35797,7 +35856,7 @@ } } }, - "S29y": { + "S2a7": { "type": "structure", "members": { "AllowDnsResolutionFromRemoteVpc": { @@ -35811,7 +35870,7 @@ } } }, - "S2a0": { + "S2a9": { "type": "structure", "members": { "AllowDnsResolutionFromRemoteVpc": { @@ -35828,7 +35887,7 @@ } } }, - "S2af": { + "S2ao": { "type": "list", "member": { "locationName": "item", @@ -35844,13 +35903,13 @@ } } }, - "S2e9": { + "S2ei": { "type": "list", "member": { "locationName": "SecurityGroupId" } }, - "S2ex": { + "S2f6": { "type": "list", "member": { "locationName": "item", @@ -35870,7 +35929,7 @@ } } }, - "S2fn": { + "S2fw": { "type": "list", "member": { "locationName": "item", diff --git a/apis/ec2-2016-11-15.normal.json b/apis/ec2-2016-11-15.normal.json index de87e13ae1..aee7c3ab19 100644 --- a/apis/ec2-2016-11-15.normal.json +++ b/apis/ec2-2016-11-15.normal.json @@ -6067,6 +6067,20 @@ }, "documentation": "Gets the current state of block public access for AMIs at the account level in the specified Amazon Web Services Region.
For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.
" }, + "GetInstanceMetadataDefaults": { + "name": "GetInstanceMetadataDefaults", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "GetInstanceMetadataDefaultsRequest" + }, + "output": { + "shape": "GetInstanceMetadataDefaultsResult" + }, + "documentation": "Gets the default instance metadata service (IMDS) settings that are set at the account level in the specified Amazon Web Services Region.
For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.
" + }, "GetInstanceTypesFromInstanceRequirements": { "name": "GetInstanceTypesFromInstanceRequirements", "http": { @@ -6909,6 +6923,20 @@ }, "documentation": "Modifies the recovery behavior of your instance to disable simplified automatic recovery or set the recovery behavior to default. The default configuration will not enable simplified automatic recovery for an unsupported instance type. For more information, see Simplified automatic recovery.
" }, + "ModifyInstanceMetadataDefaults": { + "name": "ModifyInstanceMetadataDefaults", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "ModifyInstanceMetadataDefaultsRequest" + }, + "output": { + "shape": "ModifyInstanceMetadataDefaultsResult" + }, + "documentation": "Modifies the default instance metadata service (IMDS) settings at the account level in the specified Amazon Web Services Region.
To remove a parameter's account-level default setting, specify no-preference
. At instance launch, the value will come from the AMI, or from the launch parameter if specified. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The account-level default IMDS settings.
", + "locationName": "accountLevel" + } + } + }, "GetInstanceTypesFromInstanceRequirementsRequest": { "type": "structure", "required": [ @@ -35846,7 +35912,7 @@ }, "ImageOwnerAlias": { "shape": "String", - "documentation": "The Amazon Web Services account alias (for example, amazon
, self
) or the Amazon Web Services account ID of the AMI owner.
The owner alias (amazon
| aws-marketplace
).
Indicates whether IMDSv2 is required.
optional
– IMDSv2 is optional, which means that you can use either IMDSv2 or IMDSv1.
required
– IMDSv2 is required, which means that IMDSv1 is disabled, and you must use IMDSv2.
The maximum number of hops that the metadata token can travel.
", + "locationName": "httpPutResponseHopLimit" + }, + "HttpEndpoint": { + "shape": "InstanceMetadataEndpointState", + "documentation": "Indicates whether the IMDS endpoint for an instance is enabled or disabled. When disabled, the instance metadata can't be accessed.
", + "locationName": "httpEndpoint" + }, + "InstanceMetadataTags": { + "shape": "InstanceMetadataTagsState", + "documentation": "Indicates whether access to instance tags from the instance metadata is enabled or disabled. For more information, see Work with instance tags using the instance metadata in the Amazon EC2 User Guide.
", + "locationName": "instanceMetadataTags" + } + }, + "documentation": "The default instance metadata service (IMDS) settings that were set at the account level in the specified Amazon Web Services Region.
" + }, "InstanceMetadataEndpointState": { "type": "string", "enum": [ @@ -38041,11 +38133,11 @@ "members": { "HttpTokens": { "shape": "HttpTokensState", - "documentation": "Indicates whether IMDSv2 is required.
optional
- IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials.
required
- IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Default: If the value of ImdsSupport
for the Amazon Machine Image (AMI) for your instance is v2.0
, the default is required
.
Indicates whether IMDSv2 is required.
optional
- IMDSv2 is optional, which means that you can use either IMDSv2 or IMDSv1.
required
- IMDSv2 is required, which means that IMDSv1 is disabled, and you must use IMDSv2.
Default:
If the value of ImdsSupport
for the Amazon Machine Image (AMI) for your instance is v2.0
and the account level default is set to no-preference
, the default is required
.
If the value of ImdsSupport
for the Amazon Machine Image (AMI) for your instance is v2.0
, but the account level default is set to V1 or V2
, the default is optional
.
The default value can also be affected by other combinations of parameters. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.
" }, "HttpPutResponseHopLimit": { "shape": "Integer", - "documentation": "The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel.
Default: 1
Possible values: Integers from 1 to 64
" + "documentation": "The maximum number of hops that the metadata token can travel.
Possible values: Integers from 1 to 64
" }, "HttpEndpoint": { "shape": "InstanceMetadataEndpointState", @@ -38072,12 +38164,12 @@ }, "HttpTokens": { "shape": "HttpTokensState", - "documentation": "Indicates whether IMDSv2 is required.
optional
- IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials.
required
- IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Indicates whether IMDSv2 is required.
optional
- IMDSv2 is optional, which means that you can use either IMDSv2 or IMDSv1.
required
- IMDSv2 is required, which means that IMDSv1 is disabled, and you must use IMDSv2.
The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel.
Default: 1
Possible values: Integers from 1
to 64
The maximum number of hops that the metadata token can travel.
Possible values: Integers from 1
to 64
Indicates whether IMDSv2 is required.
optional
– IMDSv2 is optional, which means that you can use either IMDSv2 or IMDSv1.
required
– IMDSv2 is required, which means that IMDSv1 is disabled, and you must use IMDSv2.
The maximum number of hops that the metadata token can travel.
Minimum: 1
Maximum: 64
Enables or disables the IMDS endpoint on an instance. When disabled, the instance metadata can't be accessed.
" + }, + "InstanceMetadataTags": { + "shape": "DefaultInstanceMetadataTagsState", + "documentation": "Enables or disables access to an instance's tags from the instance metadata. For more information, see Work with instance tags using the instance metadata in the Amazon EC2 User Guide.
" + }, + "DryRun": { + "shape": "Boolean", + "documentation": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
If the request succeeds, the response returns true
. If the request fails, no response is returned, and instead an error message is returned.
Indicates whether IMDSv2 is required.
optional
- IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials.
required
- IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Default: If the value of ImdsSupport
for the Amazon Machine Image (AMI) for your instance is v2.0
, the default is required
.
Indicates whether IMDSv2 is required.
optional
- IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials.
required
- IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Default:
If the value of ImdsSupport
for the Amazon Machine Image (AMI) for your instance is v2.0
and the account level default is set to no-preference
, the default is required
.
If the value of ImdsSupport
for the Amazon Machine Image (AMI) for your instance is v2.0
, but the account level default is set to V1 or V2
, the default is optional
.
The default value can also be affected by other combinations of parameters. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.
" }, "HttpPutResponseHopLimit": { "shape": "Integer", @@ -55698,7 +55833,7 @@ }, "MaxResults": { "shape": "TransitGatewayMaxResults", - "documentation": "The maximum number of routes to return.
" + "documentation": "The maximum number of routes to return. If a value is not provided, the default is 1000.
" }, "DryRun": { "shape": "Boolean", diff --git a/apis/ecs-2014-11-13.normal.json b/apis/ecs-2014-11-13.normal.json index e4333f881d..88e526455c 100644 --- a/apis/ecs-2014-11-13.normal.json +++ b/apis/ecs-2014-11-13.normal.json @@ -113,7 +113,7 @@ "shape": "NamespaceNotFoundException" } ], - "documentation": "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount
, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
There are two service scheduler strategies available:
REPLICA
- The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
DAEMON
- The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent
is 100%. The default value for a daemon service for minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING
state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING
state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING
state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING
or PENDING
state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING
state. This is while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide.
" + "documentation": "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount
, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
There are two service scheduler strategies available:
REPLICA
- The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
DAEMON
- The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent
is 100%. The default value for a daemon service for minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING
state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING
state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING
state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING
or PENDING
state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING
state. This is while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
" }, "CreateTaskSet": { "name": "CreateTaskSet", @@ -162,7 +162,7 @@ "shape": "NamespaceNotFoundException" } ], - "documentation": "Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
" + "documentation": "Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.
" }, "DeleteAccountSetting": { "name": "DeleteAccountSetting", @@ -1197,7 +1197,7 @@ "shape": "ConflictException" } ], - "documentation": "Starts a new task using the specified task definition.
You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.
To manage eventual consistency, you can do the following:
Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.
Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.
Starts a new task using the specified task definition.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.
To manage eventual consistency, you can do the following:
Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.
Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.
Starts a new task from the specified task definition on the specified container instance or instances.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
" + "documentation": "Starts a new task from the specified task definition on the specified container instance or instances.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
" }, "StopTask": { "name": "StopTask", @@ -1597,7 +1597,7 @@ "shape": "UnsupportedFeatureException" } ], - "documentation": "Modifies the parameters of a service.
For services using the rolling update (ECS
) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations
null
, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.
You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount
parameter.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.
If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest
), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment
option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.
You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent
and maximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during a deployment. For example, if desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent of docker stop
is issued to the containers running in the task. This results in a SIGTERM
and a 30-second timeout. After this, SIGKILL
is sent and the containers are forcibly stopped. If the container handles the SIGTERM
gracefully and exits within 30 seconds from receiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.
Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.
Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:
Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.
Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.
You must have a service-linked role when you update any of the following service properties:
loadBalancers
,
serviceRegistries
For more information about the role see the CreateService
request parameter role
.
Modifies the parameters of a service.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
For services using the rolling update (ECS
) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations
null
, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.
You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount
parameter.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.
If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest
), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment
option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.
You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent
and maximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during a deployment. For example, if desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent of docker stop
is issued to the containers running in the task. This results in a SIGTERM
and a 30-second timeout. After this, SIGKILL
is sent and the containers are forcibly stopped. If the container handles the SIGTERM
gracefully and exits within 30 seconds from receiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.
Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.
Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:
Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.
Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.
You must have a service-linked role when you update any of the following service properties:
loadBalancers
,
serviceRegistries
For more information about the role see the CreateService
request parameter role
.
Details of the attachment.
For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.
For Service Connect services, this includes portName
, clientAliases
, discoveryName
, and ingressPortOverride
.
For elastic block storage, this includes roleArn
, encrypted
, filesystemType
, iops
, kmsKeyId
, sizeInGiB
, snapshotId
, tagSpecifications
, throughput
, and volumeType
.
Details of the attachment.
For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.
For Service Connect services, this includes portName
, clientAliases
, discoveryName
, and ingressPortOverride
.
For Elastic Block Storage, this includes roleArn
, deleteOnTermination
, volumeName
, volumeId
, and statusReason
(only when the attachment fails to create or attach).
An object representing a container instance or task attachment.
" @@ -2798,7 +2798,7 @@ }, "launchType": { "shape": "LaunchType", - "documentation": "The infrastructure that you run your service on. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
The FARGATE
launch type runs your tasks on Fargate On-Demand infrastructure.
Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS User Guide for Fargate.
The EC2
launch type runs your tasks on Amazon EC2 instances registered to your cluster.
The EXTERNAL
launch type runs your tasks on your on-premises server or virtual machine (VM) capacity registered to your cluster.
A service can use either a launch type or a capacity provider strategy. If a launchType
is specified, the capacityProviderStrategy
parameter must be omitted.
The infrastructure that you run your service on. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
The FARGATE
launch type runs your tasks on Fargate On-Demand infrastructure.
Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS Developer Guide.
The EC2
launch type runs your tasks on Amazon EC2 instances registered to your cluster.
The EXTERNAL
launch type runs your tasks on your on-premises server or virtual machine (VM) capacity registered to your cluster.
A service can use either a launch type or a capacity provider strategy. If a launchType
is specified, the capacityProviderStrategy
parameter must be omitted.
Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.
The default is NONE
.
Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.
You must set this to a value other than NONE
when you use Cost Explorer. For more information, see Amazon ECS usage reports in the Amazon Elastic Container Service Developer Guide.
The default is NONE
.
The file type to use. The only supported value is s3
.
The file type to use. Environment files are objects in Amazon S3. The only supported value is s3
.
A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env
file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE
format. Lines beginning with #
are treated as comments and are ignored.
If there are environment variables specified using the environment
parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying environment variables in the Amazon Elastic Container Service Developer Guide.
You must use the following platforms for the Fargate launch type:
Linux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
Consider the following when using the Fargate launch type:
The file is handled like a native Docker env-file.
There is no support for shell escape handling.
The container entry point interperts the VARIABLE
values.
A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env
file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE
format. Lines beginning with #
are treated as comments and are ignored.
If there are environment variables specified using the environment
parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Use a file to pass environment variables to a container in the Amazon Elastic Container Service Developer Guide.
Environment variable files are objects in Amazon S3 and all Amazon S3 security considerations apply.
You must use the following platforms for the Fargate launch type:
Linux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
Consider the following when using the Fargate launch type:
The file is handled like a native Docker env-file.
There is no support for shell escape handling.
The container entry point interperts the VARIABLE
values.
The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21
GiB and the maximum supported value is 200
GiB.
The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 20
GiB and the maximum supported value is 200
GiB.
The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Using data volumes in tasks in the Amazon ECS Developer Guide;.
For tasks using the Fargate launch type, the task requires the following platforms:
Linux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the startPeriod
is off.
If a health check succeeds within the startPeriod
, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.
An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK
parameter of docker run.
The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.
You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.
The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.
The following describes the possible healthStatus
values for a container:
HEALTHY
-The container health check has passed successfully.
UNHEALTHY
-The container health check has failed.
UNKNOWN
-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.
The following describes the possible healthStatus
values based on the container health checker status of essential containers in the task with the following priority order (high to low):
UNHEALTHY
-One or more essential containers have failed their health check.
UNKNOWN
-Any essential container running within the task is in an UNKNOWN
state and no other essential containers have an UNHEALTHY
state.
HEALTHY
-All essential containers within the task have passed their health checks.
Consider the following task health example with 2 containers.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
, the task health is HEALTHY
.
Consider the following task health example with 3 containers.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, and Container3 is UNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, and Container3 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is HEALTHY
, and Container3 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, and Container3 is HEALTHY
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, and Container3 is UNKNOWN
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
, and Container3 is HEALTHY
, the task health is HEALTHY
.
If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.
The following are notes about container health check support:
When the Amazon ECS agent cannot connect to the Amazon ECS service, the service reports the container as UNHEALTHY
.
The health check statuses are the \"last heard from\" response from the Amazon ECS agent. There are no assumptions made about the status of the container health checks.
Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.
Container health checks are supported for Fargate tasks if you're using platform version 1.1.0
or greater. For more information, see Fargate platform versions.
Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.
An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK
parameter of docker run.
The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.
You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.
The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.
Amazon ECS performs health checks on containers with the default that launched the container instance or the task.
The following describes the possible healthStatus
values for a container:
HEALTHY
-The container health check has passed successfully.
UNHEALTHY
-The container health check has failed.
UNKNOWN
-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.
The following describes the possible healthStatus
values based on the container health checker status of essential containers in the task with the following priority order (high to low):
UNHEALTHY
-One or more essential containers have failed their health check.
UNKNOWN
-Any essential container running within the task is in an UNKNOWN
state and no other essential containers have an UNHEALTHY
state.
HEALTHY
-All essential containers within the task have passed their health checks.
Consider the following task health example with 2 containers.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
, the task health is HEALTHY
.
Consider the following task health example with 3 containers.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, and Container3 is UNKNOWN
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is UNKNOWN
, and Container3 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is UNHEALTHY
and Container2 is HEALTHY
, and Container3 is HEALTHY
, the task health is UNHEALTHY
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, and Container3 is HEALTHY
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is UNKNOWN
, and Container3 is UNKNOWN
, the task health is UNKNOWN
.
If Container1 is HEALTHY
and Container2 is HEALTHY
, and Container3 is HEALTHY
, the task health is HEALTHY
.
If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.
The following are notes about container health check support:
When the Amazon ECS agent cannot connect to the Amazon ECS service, the service reports the container as UNHEALTHY
.
The health check statuses are the \"last heard from\" response from the Amazon ECS agent. There are no assumptions made about the status of the container health checks.
Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.
Container health checks are supported for Fargate tasks if you're using platform version 1.1.0
or greater. For more information, see Fargate platform versions.
Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.
The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop
in the Create a container section of the Docker Remote API and the --cap-drop
option to docker run.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"
The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker. For more information about the default capabilities and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run reference. For more detailed information about these Linux capabilities, see the capabilities(7) Linux manual page.
" + "documentation": "The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more information about the default capabilities and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run reference. For more detailed information about these Linux capabilities, see the capabilities(7) Linux manual page.
" }, "KeyValuePair": { "type": "structure", diff --git a/apis/emr-containers-2020-10-01.normal.json b/apis/emr-containers-2020-10-01.normal.json index 2713447772..1392b036cc 100644 --- a/apis/emr-containers-2020-10-01.normal.json +++ b/apis/emr-containers-2020-10-01.normal.json @@ -2204,7 +2204,7 @@ "value": { "shape": "TemplateParameterConfiguration" }, - "max": 20 + "max": 100 }, "TemplateParameterDataType": { "type": "string", @@ -2221,7 +2221,7 @@ "value": { "shape": "String1024" }, - "max": 20 + "max": 100 }, "TemplateParameterName": { "type": "string", diff --git a/apis/globalaccelerator-2018-08-08.min.json b/apis/globalaccelerator-2018-08-08.min.json index de7b11b65d..511a733a4a 100644 --- a/apis/globalaccelerator-2018-08-08.min.json +++ b/apis/globalaccelerator-2018-08-08.min.json @@ -754,6 +754,7 @@ "type": "structure", "members": { "EndpointId": {}, + "Cidr": {}, "AttachmentArn": {} } } @@ -1496,11 +1497,9 @@ "type": "list", "member": { "type": "structure", - "required": [ - "EndpointId" - ], "members": { "EndpointId": {}, + "Cidr": {}, "Region": {} } } diff --git a/apis/globalaccelerator-2018-08-08.normal.json b/apis/globalaccelerator-2018-08-08.normal.json index ee1275899c..021a767dd8 100644 --- a/apis/globalaccelerator-2018-08-08.normal.json +++ b/apis/globalaccelerator-2018-08-08.normal.json @@ -191,7 +191,7 @@ "shape": "TransactionInProgressException" } ], - "documentation": "Create a cross-account attachment in Global Accelerator. You create a cross-account attachment to specify the principals who have permission to add to accelerators in their own account the resources in your account that you also list in the attachment.
A principal can be an Amazon Web Services account number or the Amazon Resource Name (ARN) for an accelerator. For account numbers that are listed as principals, to add a resource listed in the attachment to an accelerator, you must sign in to an account specified as a principal. Then you can add the resources that are listed to any of your accelerators. If an accelerator ARN is listed in the cross-account attachment as a principal, anyone with permission to make updates to the accelerator can add as endpoints resources that are listed in the attachment.
" + "documentation": "Create a cross-account attachment in Global Accelerator. You create a cross-account attachment to specify the principals who have permission to work with resources in accelerators in their own account. You specify, in the same attachment, the resources that are shared.
A principal can be an Amazon Web Services account number or the Amazon Resource Name (ARN) for an accelerator. For account numbers that are listed as principals, to work with a resource listed in the attachment, you must sign in to an account specified as a principal. Then, you can work with resources that are listed, with any of your accelerators. If an accelerator ARN is listed in the cross-account attachment as a principal, anyone with permission to make updates to the accelerator can work with resources that are listed in the attachment.
Specify each principal and resource separately. To specify two CIDR address pools, list them individually under Resources
, and so on. For a command line operation, for example, you might use a statement like the following:
\"Resources\": [{\"Cidr\": \"169.254.60.0/24\"},{\"Cidr\": \"169.254.59.0/24\"}]
For more information, see Working with cross-account attachments and resources in Global Accelerator in the Global Accelerator Developer Guide.
" }, "CreateCustomRoutingAccelerator": { "name": "CreateCustomRoutingAccelerator", @@ -414,7 +414,7 @@ "shape": "TransactionInProgressException" } ], - "documentation": "Delete a cross-account attachment. When you delete an attachment, Global Accelerator revokes the permission to use the resources in the attachment from all principals in the list of principals. Global Accelerator revokes the permission for specific resources by doing the following:
If the principal is an account ID, Global Accelerator reviews every accelerator in the account and removes cross-account endpoints from all accelerators.
If the principal is an accelerator, Global Accelerator reviews just that accelerator and removes cross-account endpoints from it.
If there are overlapping permissions provided by multiple cross-account attachments, Global Accelerator only removes endpoints if there are no current cross-account attachments that provide access permission. For example, if you delete a cross-account attachment that lists an accelerator as a principal, but another cross-account attachment includes the account ID that owns that accelerator, endpoints will not be removed from the accelerator.
" + "documentation": "Delete a cross-account attachment. When you delete an attachment, Global Accelerator revokes the permission to use the resources in the attachment from all principals in the list of principals. Global Accelerator revokes the permission for specific resources.
For more information, see Working with cross-account attachments and resources in Global Accelerator in the Global Accelerator Developer Guide.
" }, "DeleteCustomRoutingAccelerator": { "name": "DeleteCustomRoutingAccelerator", @@ -920,7 +920,7 @@ "shape": "InternalServiceErrorException" } ], - "documentation": "List the accounts that have cross-account endpoints.
" + "documentation": "List the accounts that have cross-account resources.
For more information, see Working with cross-account attachments and resources in Global Accelerator in the Global Accelerator Developer Guide.
" }, "ListCrossAccountResources": { "name": "ListCrossAccountResources", @@ -951,7 +951,7 @@ "shape": "AcceleratorNotFoundException" } ], - "documentation": "List the cross-account endpoints available to add to an accelerator.
" + "documentation": "List the cross-account resources available to work with.
" }, "ListCustomRoutingAccelerators": { "name": "ListCustomRoutingAccelerators", @@ -1402,7 +1402,7 @@ "shape": "TransactionInProgressException" } ], - "documentation": "Update a cross-account attachment to add or remove principals or resources. When you update an attachment to remove a principal (account ID or accelerator) or a resource, Global Accelerator revokes the permission for specific resources by doing the following:
If the principal is an account ID, Global Accelerator reviews every accelerator in the account and removes cross-account endpoints from all accelerators.
If the principal is an accelerator, Global Accelerator reviews just that accelerator and removes cross-account endpoints from it.
If there are overlapping permissions provided by multiple cross-account attachments, Global Accelerator only removes endpoints if there are no current cross-account attachments that provide access permission. For example, if you delete a cross-account attachment that lists an accelerator as a principal, but another cross-account attachment includes the account ID that owns that accelerator, endpoints will not be removed from the accelerator.
" + "documentation": "Update a cross-account attachment to add or remove principals or resources. When you update an attachment to remove a principal (account ID or accelerator) or a resource, Global Accelerator revokes the permission for specific resources.
For more information, see Working with cross-account attachments and resources in Global Accelerator in the Global Accelerator Developer Guide.
" }, "UpdateCustomRoutingAccelerator": { "name": "UpdateCustomRoutingAccelerator", @@ -1752,7 +1752,7 @@ "members": { "Cidr": { "shape": "GenericString", - "documentation": "The address range, in CIDR notation. This must be the exact range that you provisioned. You can't advertise only a portion of the provisioned range.
" + "documentation": "The address range, in CIDR notation. This must be the exact range that you provisioned. You can't advertise only a portion of the provisioned range.
For more information, see Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide.
" } } }, @@ -1822,7 +1822,7 @@ "documentation": "The date and time that the cross-account attachment was created.
" } }, - "documentation": "A cross-account attachment in Global Accelerator. A cross-account attachment specifies the principals who have permission to add to accelerators in their own account the resources in your account that you also list in the attachment.
" + "documentation": "A cross-account attachment in Global Accelerator. A cross-account attachment specifies the principals who have permission to work with resources in your account, which you also list in the attachment.
" }, "AttachmentName": { "type": "string", @@ -1852,7 +1852,7 @@ "members": { "Cidr": { "shape": "GenericString", - "documentation": "The address range, in CIDR notation.
" + "documentation": "The address range, in CIDR notation.
For more information, see Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide.
" }, "State": { "shape": "ByoipCidrState", @@ -1988,11 +1988,11 @@ }, "Principals": { "shape": "Principals", - "documentation": "The principals to list in the cross-account attachment. A principal can be an Amazon Web Services account number or the Amazon Resource Name (ARN) for an accelerator.
" + "documentation": "The principals to include in the cross-account attachment. A principal can be an Amazon Web Services account number or the Amazon Resource Name (ARN) for an accelerator.
" }, "Resources": { "shape": "Resources", - "documentation": "The Amazon Resource Names (ARNs) for the resources to list in the cross-account attachment. A resource can be any supported Amazon Web Services resource type for Global Accelerator.
" + "documentation": "The Amazon Resource Names (ARNs) for the resources to include in the cross-account attachment. A resource can be any supported Amazon Web Services resource type for Global Accelerator or a CIDR range for a bring your own IP address (BYOIP) address pool.
" }, "IdempotencyToken": { "shape": "IdempotencyToken", @@ -2001,7 +2001,7 @@ }, "Tags": { "shape": "Tags", - "documentation": "Create tags for cross-account attachment.
For more information, see Tagging in Global Accelerator in the Global Accelerator Developer Guide.
" + "documentation": "Add tags for a cross-account attachment.
For more information, see Tagging in Global Accelerator in the Global Accelerator Developer Guide.
" } } }, @@ -2238,12 +2238,16 @@ "shape": "GenericString", "documentation": "The endpoint ID for the endpoint that is listed in a cross-account attachment and can be added to an accelerator by specified principals.
" }, + "Cidr": { + "shape": "GenericString", + "documentation": "An IP address range, in CIDR format, that is specified as an Amazon Web Services resource. The address must be provisioned and advertised in Global Accelerator by following the bring your own IP address (BYOIP) process for Global Accelerator.
For more information, see Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide.
" + }, "AttachmentArn": { "shape": "GenericString", - "documentation": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints to accelerators.
" + "documentation": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the resources (endpoints or CIDR range) that can be added to accelerators and principals that have permission to add them.
" } }, - "documentation": "An endpoint (Amazon Web Services resource) that is listed in a cross-account attachment and can be added to an accelerator by specified principals, that are also listed in the attachment.
" + "documentation": "An endpoint (Amazon Web Services resource) or an IP address range, in CIDR format, that is listed in a cross-account attachment. A cross-account resource can be added to an accelerator by specified principals, which are also listed in the attachment.
For more information, see Working with cross-account attachments and resources in Global Accelerator in the Global Accelerator Developer Guide.
" }, "CrossAccountResources": { "type": "list", @@ -2395,7 +2399,7 @@ }, "AttachmentArn": { "shape": "GenericString", - "documentation": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints to accelerators.
" + "documentation": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints.
" } }, "documentation": "The list of endpoint objects. For custom routing, this is a list of virtual private cloud (VPC) subnet IDs.
" @@ -2608,7 +2612,7 @@ "members": { "Cidr": { "shape": "GenericString", - "documentation": "The address range, in CIDR notation. The prefix must be the same prefix that you specified when you provisioned the address range.
" + "documentation": "The address range, in CIDR notation. The prefix must be the same prefix that you specified when you provisioned the address range.
For more information, see Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide.
" } } }, @@ -2873,7 +2877,7 @@ "members": { "EndpointId": { "shape": "GenericString", - "documentation": "An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID. For Amazon EC2 instances, this is the EC2 instance ID. A resource must be valid and active when you add it as an endpoint.
An Application Load Balancer can be either internal or internet-facing.
" + "documentation": "An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID. For Amazon EC2 instances, this is the EC2 instance ID. A resource must be valid and active when you add it as an endpoint.
For cross-account endpoints, this must be the ARN of the resource.
" }, "Weight": { "shape": "EndpointWeight", @@ -2885,7 +2889,7 @@ }, "AttachmentArn": { "shape": "GenericString", - "documentation": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints to accelerators.
" + "documentation": "The Amazon Resource Name (ARN) of the cross-account attachment that specifies the endpoints (resources) that can be added to accelerators and principals that have permission to add the endpoints.
" } }, "documentation": "A complex type for endpoints. A resource must be valid and active when you add it as an endpoint.
" @@ -3199,7 +3203,7 @@ "members": { "ResourceOwnerAwsAccountIds": { "shape": "AwsAccountIds", - "documentation": "The account IDs of principals (resource owners) in a cross-account attachment who can add endpoints (resources) listed in the same attachment.
" + "documentation": "The account IDs of principals (resource owners) in a cross-account attachment who can work with resources listed in the same attachment.
" } } }, @@ -3219,7 +3223,7 @@ }, "MaxResults": { "shape": "MaxResults", - "documentation": "The number of cross-account endpoints objects that you want to return with this call. The default value is 10.
" + "documentation": "The number of cross-account resource objects that you want to return with this call. The default value is 10.
" }, "NextToken": { "shape": "GenericString", @@ -3232,7 +3236,7 @@ "members": { "CrossAccountResources": { "shape": "CrossAccountResources", - "documentation": "The endpoints attached to an accelerator in a cross-account attachment.
" + "documentation": "The cross-account resources used with an accelerator.
" }, "NextToken": { "shape": "GenericString", @@ -3650,7 +3654,7 @@ "members": { "Cidr": { "shape": "GenericString", - "documentation": "The public IPv4 address range, in CIDR notation. The most specific IP prefix that you can specify is /24. The address range cannot overlap with another address range that you've brought to this or another Region.
" + "documentation": "The public IPv4 address range, in CIDR notation. The most specific IP prefix that you can specify is /24. The address range cannot overlap with another address range that you've brought to this Amazon Web Services Region or another Region.
For more information, see Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide.
" }, "CidrAuthorizationContext": { "shape": "CidrAuthorizationContext", @@ -3703,20 +3707,21 @@ }, "Resource": { "type": "structure", - "required": [ - "EndpointId" - ], "members": { "EndpointId": { "shape": "GenericString", - "documentation": "The endpoint ID for the endpoint (Amazon Web Services resource).
" + "documentation": "The endpoint ID for the endpoint that is specified as a Amazon Web Services resource.
An endpoint ID for the cross-account feature is the ARN of an Amazon Web Services resource, such as a Network Load Balancer, that Global Accelerator supports as an endpoint for an accelerator.
" + }, + "Cidr": { + "shape": "GenericString", + "documentation": "An IP address range, in CIDR format, that is specified as resource. The address must be provisioned and advertised in Global Accelerator by following the bring your own IP address (BYOIP) process for Global Accelerator
For more information, see Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide.
" }, "Region": { "shape": "GenericString", - "documentation": "The Amazon Web Services Region where a resource is located.
" + "documentation": "The Amazon Web Services Region where a shared endpoint resource is located.
" } }, - "documentation": "An Amazon Web Services resource that is supported by Global Accelerator and can be added as an endpoint for an accelerator.
" + "documentation": "A resource is one of the following: the ARN for an Amazon Web Services resource that is supported by Global Accelerator to be added as an endpoint, or a CIDR range that specifies a bring your own IP (BYOIP) address pool.
" }, "ResourceArn": { "type": "string", @@ -3928,19 +3933,19 @@ }, "AddPrincipals": { "shape": "Principals", - "documentation": "The principals to add to the cross-account attachment. A principal is an account or the Amazon Resource Name (ARN) of an accelerator that the attachment gives permission to add the resources from another account, listed in the attachment.
To add more than one principal, separate the account numbers or accelerator ARNs, or both, with commas.
" + "documentation": "The principals to add to the cross-account attachment. A principal is an account or the Amazon Resource Name (ARN) of an accelerator that the attachment gives permission to work with resources from another account. The resources are also listed in the attachment.
To add more than one principal, separate the account numbers or accelerator ARNs, or both, with commas.
" }, "RemovePrincipals": { "shape": "Principals", - "documentation": "The principals to remove from the cross-account attachment. A principal is an account or the Amazon Resource Name (ARN) of an accelerator that is given permission to add the resources from another account, listed in the cross-account attachment.
To remove more than one principal, separate the account numbers or accelerator ARNs, or both, with commas.
" + "documentation": "The principals to remove from the cross-account attachment. A principal is an account or the Amazon Resource Name (ARN) of an accelerator that the attachment gives permission to work with resources from another account. The resources are also listed in the attachment.
To remove more than one principal, separate the account numbers or accelerator ARNs, or both, with commas.
" }, "AddResources": { "shape": "Resources", - "documentation": "The resources to add to the cross-account attachment. A resource listed in a cross-account attachment can be added to an accelerator by the principals that are listed in the attachment.
To add more than one resource, separate the resource ARNs with commas.
" + "documentation": "The resources to add to the cross-account attachment. A resource listed in a cross-account attachment can be used with an accelerator by the principals that are listed in the attachment.
To add more than one resource, separate the resource ARNs with commas.
" }, "RemoveResources": { "shape": "Resources", - "documentation": "The resources to remove from the cross-account attachment. A resource listed in a cross-account attachment can be added to an accelerator fy principals that are listed in the cross-account attachment.
To remove more than one resource, separate the resource ARNs with commas.
" + "documentation": "The resources to remove from the cross-account attachment. A resource listed in a cross-account attachment can be used with an accelerator by the principals that are listed in the attachment.
To remove more than one resource, separate the resource ARNs with commas.
" } } }, @@ -4139,7 +4144,7 @@ "members": { "Cidr": { "shape": "GenericString", - "documentation": "The address range, in CIDR notation.
" + "documentation": "The address range, in CIDR notation.
For more information, see Bring your own IP addresses (BYOIP) in the Global Accelerator Developer Guide.
" } } }, @@ -4148,7 +4153,7 @@ "members": { "ByoipCidr": { "shape": "ByoipCidr", - "documentation": "Information about the address pool.
" + "documentation": "Information about the BYOIP address pool.
" } } } diff --git a/apis/medialive-2017-10-14.min.json b/apis/medialive-2017-10-14.min.json index 1d331dc296..055aec9ccb 100644 --- a/apis/medialive-2017-10-14.min.json +++ b/apis/medialive-2017-10-14.min.json @@ -278,11 +278,11 @@ "locationName": "encoderSettings" }, "InputAttachments": { - "shape": "Sdf", + "shape": "Sdl", "locationName": "inputAttachments" }, "InputSpecification": { - "shape": "Sf4", + "shape": "Sfa", "locationName": "inputSpecification" }, "LogLevel": { @@ -315,7 +315,7 @@ "locationName": "roleArn" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Vpc": { @@ -345,7 +345,7 @@ "type": "structure", "members": { "Channel": { - "shape": "Sff", + "shape": "Sfl", "locationName": "channel" } } @@ -360,11 +360,11 @@ "type": "structure", "members": { "Destinations": { - "shape": "Sfo", + "shape": "Sfu", "locationName": "destinations" }, "InputDevices": { - "shape": "Sfq", + "shape": "Sfw", "locationName": "inputDevices" }, "InputSecurityGroups": { @@ -372,7 +372,7 @@ "locationName": "inputSecurityGroups" }, "MediaConnectFlows": { - "shape": "Sfs", + "shape": "Sfy", "locationName": "mediaConnectFlows" }, "Name": { @@ -386,11 +386,11 @@ "locationName": "roleArn" }, "Sources": { - "shape": "Sfu", + "shape": "Sg0", "locationName": "sources" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Type": { @@ -419,7 +419,7 @@ "type": "structure", "members": { "Input": { - "shape": "Sfz", + "shape": "Sg5", "locationName": "input" } } @@ -434,11 +434,11 @@ "type": "structure", "members": { "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "WhitelistRules": { - "shape": "Sgb", + "shape": "Sgh", "locationName": "whitelistRules" } } @@ -447,7 +447,7 @@ "type": "structure", "members": { "SecurityGroup": { - "shape": "Sge", + "shape": "Sgk", "locationName": "securityGroup" } } @@ -466,7 +466,7 @@ "locationName": "availabilityZones" }, "MultiplexSettings": { - "shape": "Sgj", + "shape": "Sgp", "locationName": "multiplexSettings" }, "Name": { @@ -477,7 +477,7 @@ "idempotencyToken": true }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } }, @@ -492,7 +492,7 @@ "type": "structure", "members": { "Multiplex": { - "shape": "Sgo", + "shape": "Sgu", "locationName": "multiplex" } } @@ -511,7 +511,7 @@ "locationName": "multiplexId" }, "MultiplexProgramSettings": { - "shape": "Sgu", + "shape": "Sh0", "locationName": "multiplexProgramSettings" }, "ProgramName": { @@ -533,7 +533,7 @@ "type": "structure", "members": { "MultiplexProgram": { - "shape": "Sh2", + "shape": "Sh8", "locationName": "multiplexProgram" } } @@ -556,7 +556,7 @@ "idempotencyToken": true }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } }, @@ -568,7 +568,7 @@ "type": "structure", "members": { "Input": { - "shape": "Sfz", + "shape": "Sg5", "locationName": "input" } } @@ -587,7 +587,7 @@ "locationName": "resource-arn" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } }, @@ -632,7 +632,7 @@ "locationName": "destinations" }, "EgressEndpoints": { - "shape": "Sfg", + "shape": "Sfm", "locationName": "egressEndpoints" }, "EncoderSettings": { @@ -643,25 +643,25 @@ "locationName": "id" }, "InputAttachments": { - "shape": "Sdf", + "shape": "Sdl", "locationName": "inputAttachments" }, "InputSpecification": { - "shape": "Sf4", + "shape": "Sfa", "locationName": "inputSpecification" }, "LogLevel": { "locationName": "logLevel" }, "Maintenance": { - "shape": "Sfi", + "shape": "Sfo", "locationName": "maintenance" }, "Name": { "locationName": "name" }, "PipelineDetails": { - "shape": "Sfj", + "shape": "Sfp", "locationName": "pipelineDetails" }, "PipelinesRunningCount": { @@ -675,11 +675,11 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Vpc": { - "shape": "Sfm", + "shape": "Sfs", "locationName": "vpc" } } @@ -760,14 +760,14 @@ "locationName": "availabilityZones" }, "Destinations": { - "shape": "Sgp", + "shape": "Sgv", "locationName": "destinations" }, "Id": { "locationName": "id" }, "MultiplexSettings": { - "shape": "Sgj", + "shape": "Sgp", "locationName": "multiplexSettings" }, "Name": { @@ -785,7 +785,7 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } } @@ -821,15 +821,15 @@ "locationName": "channelId" }, "MultiplexProgramSettings": { - "shape": "Sgu", + "shape": "Sh0", "locationName": "multiplexProgramSettings" }, "PacketIdentifiersMap": { - "shape": "Sh3", + "shape": "Sh9", "locationName": "packetIdentifiersMap" }, "PipelineDetails": { - "shape": "Sh5", + "shape": "Shb", "locationName": "pipelineDetails" }, "ProgramName": { @@ -899,14 +899,14 @@ "locationName": "region" }, "RenewalSettings": { - "shape": "Sho", + "shape": "Shu", "locationName": "renewalSettings" }, "ReservationId": { "locationName": "reservationId" }, "ResourceSpecification": { - "shape": "Shq", + "shape": "Shw", "locationName": "resourceSpecification" }, "Start": { @@ -916,7 +916,7 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "UsagePrice": { @@ -988,7 +988,7 @@ "type": "structure", "members": { "AccountConfiguration": { - "shape": "Si4", + "shape": "Sia", "locationName": "accountConfiguration" } } @@ -1030,7 +1030,7 @@ "locationName": "destinations" }, "EgressEndpoints": { - "shape": "Sfg", + "shape": "Sfm", "locationName": "egressEndpoints" }, "EncoderSettings": { @@ -1041,25 +1041,25 @@ "locationName": "id" }, "InputAttachments": { - "shape": "Sdf", + "shape": "Sdl", "locationName": "inputAttachments" }, "InputSpecification": { - "shape": "Sf4", + "shape": "Sfa", "locationName": "inputSpecification" }, "LogLevel": { "locationName": "logLevel" }, "Maintenance": { - "shape": "Sfi", + "shape": "Sfo", "locationName": "maintenance" }, "Name": { "locationName": "name" }, "PipelineDetails": { - "shape": "Sfj", + "shape": "Sfp", "locationName": "pipelineDetails" }, "PipelinesRunningCount": { @@ -1073,11 +1073,11 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Vpc": { - "shape": "Sfm", + "shape": "Sfs", "locationName": "vpc" } } @@ -1112,7 +1112,7 @@ "locationName": "attachedChannels" }, "Destinations": { - "shape": "Sg0", + "shape": "Sg6", "locationName": "destinations" }, "Id": { @@ -1122,7 +1122,7 @@ "locationName": "inputClass" }, "InputDevices": { - "shape": "Sfq", + "shape": "Sfw", "locationName": "inputDevices" }, "InputPartnerIds": { @@ -1133,7 +1133,7 @@ "locationName": "inputSourceType" }, "MediaConnectFlows": { - "shape": "Sg5", + "shape": "Sgb", "locationName": "mediaConnectFlows" }, "Name": { @@ -1147,14 +1147,14 @@ "locationName": "securityGroups" }, "Sources": { - "shape": "Sg7", + "shape": "Sgd", "locationName": "sources" }, "State": { "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Type": { @@ -1197,7 +1197,7 @@ "locationName": "deviceUpdateStatus" }, "HdDeviceSettings": { - "shape": "Sie", + "shape": "Sik", "locationName": "hdDeviceSettings" }, "Id": { @@ -1210,7 +1210,7 @@ "locationName": "name" }, "NetworkSettings": { - "shape": "Sij", + "shape": "Sip", "locationName": "networkSettings" }, "SerialNumber": { @@ -1220,11 +1220,11 @@ "locationName": "type" }, "UhdDeviceSettings": { - "shape": "Sim", + "shape": "Sis", "locationName": "uhdDeviceSettings" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "AvailabilityZone": { @@ -1328,11 +1328,11 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "WhitelistRules": { - "shape": "Sgg", + "shape": "Sgm", "locationName": "whitelistRules" } } @@ -1367,14 +1367,14 @@ "locationName": "availabilityZones" }, "Destinations": { - "shape": "Sgp", + "shape": "Sgv", "locationName": "destinations" }, "Id": { "locationName": "id" }, "MultiplexSettings": { - "shape": "Sgj", + "shape": "Sgp", "locationName": "multiplexSettings" }, "Name": { @@ -1392,7 +1392,7 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } } @@ -1428,15 +1428,15 @@ "locationName": "channelId" }, "MultiplexProgramSettings": { - "shape": "Sgu", + "shape": "Sh0", "locationName": "multiplexProgramSettings" }, "PacketIdentifiersMap": { - "shape": "Sh3", + "shape": "Sh9", "locationName": "packetIdentifiersMap" }, "PipelineDetails": { - "shape": "Sh5", + "shape": "Shb", "locationName": "pipelineDetails" }, "ProgramName": { @@ -1496,7 +1496,7 @@ "locationName": "region" }, "ResourceSpecification": { - "shape": "Shq", + "shape": "Shw", "locationName": "resourceSpecification" }, "UsagePrice": { @@ -1567,14 +1567,14 @@ "locationName": "region" }, "RenewalSettings": { - "shape": "Sho", + "shape": "Shu", "locationName": "renewalSettings" }, "ReservationId": { "locationName": "reservationId" }, "ResourceSpecification": { - "shape": "Shq", + "shape": "Shw", "locationName": "resourceSpecification" }, "Start": { @@ -1584,7 +1584,7 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "UsagePrice": { @@ -1747,25 +1747,25 @@ "locationName": "destinations" }, "EgressEndpoints": { - "shape": "Sfg", + "shape": "Sfm", "locationName": "egressEndpoints" }, "Id": { "locationName": "id" }, "InputAttachments": { - "shape": "Sdf", + "shape": "Sdl", "locationName": "inputAttachments" }, "InputSpecification": { - "shape": "Sf4", + "shape": "Sfa", "locationName": "inputSpecification" }, "LogLevel": { "locationName": "logLevel" }, "Maintenance": { - "shape": "Sfi", + "shape": "Sfo", "locationName": "maintenance" }, "Name": { @@ -1782,11 +1782,11 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Vpc": { - "shape": "Sfm", + "shape": "Sfs", "locationName": "vpc" } } @@ -1897,7 +1897,7 @@ "locationName": "deviceUpdateStatus" }, "HdDeviceSettings": { - "shape": "Sie", + "shape": "Sik", "locationName": "hdDeviceSettings" }, "Id": { @@ -1910,7 +1910,7 @@ "locationName": "name" }, "NetworkSettings": { - "shape": "Sij", + "shape": "Sip", "locationName": "networkSettings" }, "SerialNumber": { @@ -1920,11 +1920,11 @@ "locationName": "type" }, "UhdDeviceSettings": { - "shape": "Sim", + "shape": "Sis", "locationName": "uhdDeviceSettings" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "AvailabilityZone": { @@ -1973,7 +1973,7 @@ "locationName": "inputSecurityGroups", "type": "list", "member": { - "shape": "Sge" + "shape": "Sgk" } }, "NextToken": { @@ -2009,7 +2009,7 @@ "locationName": "inputs", "type": "list", "member": { - "shape": "Sfz" + "shape": "Sg5" } }, "NextToken": { @@ -2133,7 +2133,7 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } } @@ -2247,7 +2247,7 @@ "locationName": "region" }, "ResourceSpecification": { - "shape": "Shq", + "shape": "Shw", "locationName": "resourceSpecification" }, "UsagePrice": { @@ -2322,7 +2322,7 @@ "locationName": "reservations", "type": "list", "member": { - "shape": "Skk" + "shape": "Skq" } } } @@ -2350,7 +2350,7 @@ "type": "structure", "members": { "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } } @@ -2376,7 +2376,7 @@ "locationName": "offeringId" }, "RenewalSettings": { - "shape": "Sho", + "shape": "Shu", "locationName": "renewalSettings" }, "RequestId": { @@ -2387,7 +2387,7 @@ "locationName": "start" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } }, @@ -2400,7 +2400,7 @@ "type": "structure", "members": { "Reservation": { - "shape": "Skk", + "shape": "Skq", "locationName": "reservation" } } @@ -2488,7 +2488,7 @@ "locationName": "destinations" }, "EgressEndpoints": { - "shape": "Sfg", + "shape": "Sfm", "locationName": "egressEndpoints" }, "EncoderSettings": { @@ -2499,25 +2499,25 @@ "locationName": "id" }, "InputAttachments": { - "shape": "Sdf", + "shape": "Sdl", "locationName": "inputAttachments" }, "InputSpecification": { - "shape": "Sf4", + "shape": "Sfa", "locationName": "inputSpecification" }, "LogLevel": { "locationName": "logLevel" }, "Maintenance": { - "shape": "Sfi", + "shape": "Sfo", "locationName": "maintenance" }, "Name": { "locationName": "name" }, "PipelineDetails": { - "shape": "Sfj", + "shape": "Sfp", "locationName": "pipelineDetails" }, "PipelinesRunningCount": { @@ -2531,11 +2531,11 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Vpc": { - "shape": "Sfm", + "shape": "Sfs", "locationName": "vpc" } } @@ -2613,14 +2613,14 @@ "locationName": "availabilityZones" }, "Destinations": { - "shape": "Sgp", + "shape": "Sgv", "locationName": "destinations" }, "Id": { "locationName": "id" }, "MultiplexSettings": { - "shape": "Sgj", + "shape": "Sgp", "locationName": "multiplexSettings" }, "Name": { @@ -2638,7 +2638,7 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } } @@ -2679,7 +2679,7 @@ "locationName": "destinations" }, "EgressEndpoints": { - "shape": "Sfg", + "shape": "Sfm", "locationName": "egressEndpoints" }, "EncoderSettings": { @@ -2690,25 +2690,25 @@ "locationName": "id" }, "InputAttachments": { - "shape": "Sdf", + "shape": "Sdl", "locationName": "inputAttachments" }, "InputSpecification": { - "shape": "Sf4", + "shape": "Sfa", "locationName": "inputSpecification" }, "LogLevel": { "locationName": "logLevel" }, "Maintenance": { - "shape": "Sfi", + "shape": "Sfo", "locationName": "maintenance" }, "Name": { "locationName": "name" }, "PipelineDetails": { - "shape": "Sfj", + "shape": "Sfp", "locationName": "pipelineDetails" }, "PipelinesRunningCount": { @@ -2722,11 +2722,11 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Vpc": { - "shape": "Sfm", + "shape": "Sfs", "locationName": "vpc" } } @@ -2782,14 +2782,14 @@ "locationName": "availabilityZones" }, "Destinations": { - "shape": "Sgp", + "shape": "Sgv", "locationName": "destinations" }, "Id": { "locationName": "id" }, "MultiplexSettings": { - "shape": "Sgj", + "shape": "Sgp", "locationName": "multiplexSettings" }, "Name": { @@ -2807,7 +2807,7 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } } @@ -2854,7 +2854,7 @@ "type": "structure", "members": { "AccountConfiguration": { - "shape": "Si4", + "shape": "Sia", "locationName": "accountConfiguration" } } @@ -2863,7 +2863,7 @@ "type": "structure", "members": { "AccountConfiguration": { - "shape": "Si4", + "shape": "Sia", "locationName": "accountConfiguration" } } @@ -2895,11 +2895,11 @@ "locationName": "encoderSettings" }, "InputAttachments": { - "shape": "Sdf", + "shape": "Sdl", "locationName": "inputAttachments" }, "InputSpecification": { - "shape": "Sf4", + "shape": "Sfa", "locationName": "inputSpecification" }, "LogLevel": { @@ -2935,7 +2935,7 @@ "type": "structure", "members": { "Channel": { - "shape": "Sff", + "shape": "Sfl", "locationName": "channel" } } @@ -2971,7 +2971,7 @@ "type": "structure", "members": { "Channel": { - "shape": "Sff", + "shape": "Sfl", "locationName": "channel" } } @@ -2987,7 +2987,7 @@ "type": "structure", "members": { "Destinations": { - "shape": "Sfo", + "shape": "Sfu", "locationName": "destinations" }, "InputDevices": { @@ -3011,7 +3011,7 @@ "locationName": "inputSecurityGroups" }, "MediaConnectFlows": { - "shape": "Sfs", + "shape": "Sfy", "locationName": "mediaConnectFlows" }, "Name": { @@ -3021,7 +3021,7 @@ "locationName": "roleArn" }, "Sources": { - "shape": "Sfu", + "shape": "Sg0", "locationName": "sources" } }, @@ -3033,7 +3033,7 @@ "type": "structure", "members": { "Input": { - "shape": "Sfz", + "shape": "Sg5", "locationName": "input" } } @@ -3049,7 +3049,7 @@ "type": "structure", "members": { "HdDeviceSettings": { - "shape": "Slm", + "shape": "Sls", "locationName": "hdDeviceSettings" }, "InputDeviceId": { @@ -3060,7 +3060,7 @@ "locationName": "name" }, "UhdDeviceSettings": { - "shape": "Slm", + "shape": "Sls", "locationName": "uhdDeviceSettings" }, "AvailabilityZone": { @@ -3087,7 +3087,7 @@ "locationName": "deviceUpdateStatus" }, "HdDeviceSettings": { - "shape": "Sie", + "shape": "Sik", "locationName": "hdDeviceSettings" }, "Id": { @@ -3100,7 +3100,7 @@ "locationName": "name" }, "NetworkSettings": { - "shape": "Sij", + "shape": "Sip", "locationName": "networkSettings" }, "SerialNumber": { @@ -3110,11 +3110,11 @@ "locationName": "type" }, "UhdDeviceSettings": { - "shape": "Sim", + "shape": "Sis", "locationName": "uhdDeviceSettings" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "AvailabilityZone": { @@ -3144,11 +3144,11 @@ "locationName": "inputSecurityGroupId" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "WhitelistRules": { - "shape": "Sgb", + "shape": "Sgh", "locationName": "whitelistRules" } }, @@ -3160,7 +3160,7 @@ "type": "structure", "members": { "SecurityGroup": { - "shape": "Sge", + "shape": "Sgk", "locationName": "securityGroup" } } @@ -3180,7 +3180,7 @@ "locationName": "multiplexId" }, "MultiplexSettings": { - "shape": "Sgj", + "shape": "Sgp", "locationName": "multiplexSettings" }, "Name": { @@ -3195,7 +3195,7 @@ "type": "structure", "members": { "Multiplex": { - "shape": "Sgo", + "shape": "Sgu", "locationName": "multiplex" } } @@ -3215,7 +3215,7 @@ "locationName": "multiplexId" }, "MultiplexProgramSettings": { - "shape": "Sgu", + "shape": "Sh0", "locationName": "multiplexProgramSettings" }, "ProgramName": { @@ -3232,7 +3232,7 @@ "type": "structure", "members": { "MultiplexProgram": { - "shape": "Sh2", + "shape": "Sh8", "locationName": "multiplexProgram" } } @@ -3251,7 +3251,7 @@ "locationName": "name" }, "RenewalSettings": { - "shape": "Sho", + "shape": "Shu", "locationName": "renewalSettings" }, "ReservationId": { @@ -3267,7 +3267,7 @@ "type": "structure", "members": { "Reservation": { - "shape": "Skk", + "shape": "Skq", "locationName": "reservation" } } @@ -3313,7 +3313,7 @@ "locationName": "destinations" }, "EgressEndpoints": { - "shape": "Sfg", + "shape": "Sfm", "locationName": "egressEndpoints" }, "EncoderSettings": { @@ -3324,18 +3324,18 @@ "locationName": "id" }, "InputAttachments": { - "shape": "Sdf", + "shape": "Sdl", "locationName": "inputAttachments" }, "InputSpecification": { - "shape": "Sf4", + "shape": "Sfa", "locationName": "inputSpecification" }, "LogLevel": { "locationName": "logLevel" }, "Maintenance": { - "shape": "Sfi", + "shape": "Sfo", "locationName": "maintenance" }, "MaintenanceStatus": { @@ -3345,7 +3345,7 @@ "locationName": "name" }, "PipelineDetails": { - "shape": "Sfj", + "shape": "Sfp", "locationName": "pipelineDetails" }, "PipelinesRunningCount": { @@ -3359,11 +3359,11 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Vpc": { - "shape": "Sfm", + "shape": "Sfs", "locationName": "vpc" } } @@ -6016,6 +6016,26 @@ "TimecodeBurninSettings": { "shape": "Sb6", "locationName": "timecodeBurninSettings" + }, + "MvOverPictureBoundaries": { + "locationName": "mvOverPictureBoundaries" + }, + "MvTemporalPredictor": { + "locationName": "mvTemporalPredictor" + }, + "TileHeight": { + "locationName": "tileHeight", + "type": "integer" + }, + "TilePadding": { + "locationName": "tilePadding" + }, + "TileWidth": { + "locationName": "tileWidth", + "type": "integer" + }, + "TreeblockSize": { + "locationName": "treeblockSize" } }, "required": [ @@ -6442,7 +6462,7 @@ } } }, - "Sdf": { + "Sdl": { "type": "list", "member": { "type": "structure", @@ -6878,7 +6898,7 @@ } } }, - "Sf4": { + "Sfa": { "type": "structure", "members": { "Codec": { @@ -6892,12 +6912,12 @@ } } }, - "Sfc": { + "Sfi": { "type": "map", "key": {}, "value": {} }, - "Sff": { + "Sfl": { "type": "structure", "members": { "Arn": { @@ -6915,7 +6935,7 @@ "locationName": "destinations" }, "EgressEndpoints": { - "shape": "Sfg", + "shape": "Sfm", "locationName": "egressEndpoints" }, "EncoderSettings": { @@ -6926,25 +6946,25 @@ "locationName": "id" }, "InputAttachments": { - "shape": "Sdf", + "shape": "Sdl", "locationName": "inputAttachments" }, "InputSpecification": { - "shape": "Sf4", + "shape": "Sfa", "locationName": "inputSpecification" }, "LogLevel": { "locationName": "logLevel" }, "Maintenance": { - "shape": "Sfi", + "shape": "Sfo", "locationName": "maintenance" }, "Name": { "locationName": "name" }, "PipelineDetails": { - "shape": "Sfj", + "shape": "Sfp", "locationName": "pipelineDetails" }, "PipelinesRunningCount": { @@ -6958,16 +6978,16 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Vpc": { - "shape": "Sfm", + "shape": "Sfs", "locationName": "vpc" } } }, - "Sfg": { + "Sfm": { "type": "list", "member": { "type": "structure", @@ -6978,7 +6998,7 @@ } } }, - "Sfi": { + "Sfo": { "type": "structure", "members": { "MaintenanceDay": { @@ -6995,7 +7015,7 @@ } } }, - "Sfj": { + "Sfp": { "type": "list", "member": { "type": "structure", @@ -7018,7 +7038,7 @@ } } }, - "Sfm": { + "Sfs": { "type": "structure", "members": { "AvailabilityZones": { @@ -7039,7 +7059,7 @@ } } }, - "Sfo": { + "Sfu": { "type": "list", "member": { "type": "structure", @@ -7050,7 +7070,7 @@ } } }, - "Sfq": { + "Sfw": { "type": "list", "member": { "type": "structure", @@ -7061,7 +7081,7 @@ } } }, - "Sfs": { + "Sfy": { "type": "list", "member": { "type": "structure", @@ -7072,7 +7092,7 @@ } } }, - "Sfu": { + "Sg0": { "type": "list", "member": { "type": "structure", @@ -7089,7 +7109,7 @@ } } }, - "Sfz": { + "Sg5": { "type": "structure", "members": { "Arn": { @@ -7100,7 +7120,7 @@ "locationName": "attachedChannels" }, "Destinations": { - "shape": "Sg0", + "shape": "Sg6", "locationName": "destinations" }, "Id": { @@ -7110,7 +7130,7 @@ "locationName": "inputClass" }, "InputDevices": { - "shape": "Sfq", + "shape": "Sfw", "locationName": "inputDevices" }, "InputPartnerIds": { @@ -7121,7 +7141,7 @@ "locationName": "inputSourceType" }, "MediaConnectFlows": { - "shape": "Sg5", + "shape": "Sgb", "locationName": "mediaConnectFlows" }, "Name": { @@ -7135,14 +7155,14 @@ "locationName": "securityGroups" }, "Sources": { - "shape": "Sg7", + "shape": "Sgd", "locationName": "sources" }, "State": { "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "Type": { @@ -7150,7 +7170,7 @@ } } }, - "Sg0": { + "Sg6": { "type": "list", "member": { "type": "structure", @@ -7179,7 +7199,7 @@ } } }, - "Sg5": { + "Sgb": { "type": "list", "member": { "type": "structure", @@ -7190,7 +7210,7 @@ } } }, - "Sg7": { + "Sgd": { "type": "list", "member": { "type": "structure", @@ -7207,7 +7227,7 @@ } } }, - "Sgb": { + "Sgh": { "type": "list", "member": { "type": "structure", @@ -7218,7 +7238,7 @@ } } }, - "Sge": { + "Sgk": { "type": "structure", "members": { "Arn": { @@ -7235,16 +7255,16 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "WhitelistRules": { - "shape": "Sgg", + "shape": "Sgm", "locationName": "whitelistRules" } } }, - "Sgg": { + "Sgm": { "type": "list", "member": { "type": "structure", @@ -7255,7 +7275,7 @@ } } }, - "Sgj": { + "Sgp": { "type": "structure", "members": { "MaximumVideoBufferDelayMilliseconds": { @@ -7280,7 +7300,7 @@ "TransportStreamId" ] }, - "Sgo": { + "Sgu": { "type": "structure", "members": { "Arn": { @@ -7291,14 +7311,14 @@ "locationName": "availabilityZones" }, "Destinations": { - "shape": "Sgp", + "shape": "Sgv", "locationName": "destinations" }, "Id": { "locationName": "id" }, "MultiplexSettings": { - "shape": "Sgj", + "shape": "Sgp", "locationName": "multiplexSettings" }, "Name": { @@ -7316,12 +7336,12 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" } } }, - "Sgp": { + "Sgv": { "type": "list", "member": { "type": "structure", @@ -7338,7 +7358,7 @@ } } }, - "Sgu": { + "Sh0": { "type": "structure", "members": { "PreferredChannelPipeline": { @@ -7397,22 +7417,22 @@ "ProgramNumber" ] }, - "Sh2": { + "Sh8": { "type": "structure", "members": { "ChannelId": { "locationName": "channelId" }, "MultiplexProgramSettings": { - "shape": "Sgu", + "shape": "Sh0", "locationName": "multiplexProgramSettings" }, "PacketIdentifiersMap": { - "shape": "Sh3", + "shape": "Sh9", "locationName": "packetIdentifiersMap" }, "PipelineDetails": { - "shape": "Sh5", + "shape": "Shb", "locationName": "pipelineDetails" }, "ProgramName": { @@ -7420,15 +7440,15 @@ } } }, - "Sh3": { + "Sh9": { "type": "structure", "members": { "AudioPids": { - "shape": "Sh4", + "shape": "Sha", "locationName": "audioPids" }, "DvbSubPids": { - "shape": "Sh4", + "shape": "Sha", "locationName": "dvbSubPids" }, "DvbTeletextPid": { @@ -7444,7 +7464,7 @@ "type": "integer" }, "KlvDataPids": { - "shape": "Sh4", + "shape": "Sha", "locationName": "klvDataPids" }, "PcrPid": { @@ -7460,7 +7480,7 @@ "type": "integer" }, "Scte27Pids": { - "shape": "Sh4", + "shape": "Sha", "locationName": "scte27Pids" }, "Scte35Pid": { @@ -7477,13 +7497,13 @@ } } }, - "Sh4": { + "Sha": { "type": "list", "member": { "type": "integer" } }, - "Sh5": { + "Shb": { "type": "list", "member": { "type": "structure", @@ -7497,7 +7517,7 @@ } } }, - "Sho": { + "Shu": { "type": "structure", "members": { "AutomaticRenewal": { @@ -7509,7 +7529,7 @@ } } }, - "Shq": { + "Shw": { "type": "structure", "members": { "ChannelClass": { @@ -7538,7 +7558,7 @@ } } }, - "Si4": { + "Sia": { "type": "structure", "members": { "KmsKeyId": { @@ -7546,7 +7566,7 @@ } } }, - "Sie": { + "Sik": { "type": "structure", "members": { "ActiveInput": { @@ -7583,7 +7603,7 @@ } } }, - "Sij": { + "Sip": { "type": "structure", "members": { "DnsAddresses": { @@ -7604,7 +7624,7 @@ } } }, - "Sim": { + "Sis": { "type": "structure", "members": { "ActiveInput": { @@ -7678,7 +7698,7 @@ } } }, - "Skk": { + "Skq": { "type": "structure", "members": { "Arn": { @@ -7721,14 +7741,14 @@ "locationName": "region" }, "RenewalSettings": { - "shape": "Sho", + "shape": "Shu", "locationName": "renewalSettings" }, "ReservationId": { "locationName": "reservationId" }, "ResourceSpecification": { - "shape": "Shq", + "shape": "Shw", "locationName": "resourceSpecification" }, "Start": { @@ -7738,7 +7758,7 @@ "locationName": "state" }, "Tags": { - "shape": "Sfc", + "shape": "Sfi", "locationName": "tags" }, "UsagePrice": { @@ -7747,7 +7767,7 @@ } } }, - "Slm": { + "Sls": { "type": "structure", "members": { "ConfiguredInput": { diff --git a/apis/medialive-2017-10-14.normal.json b/apis/medialive-2017-10-14.normal.json index de73f56c7c..3b99dcaf8f 100644 --- a/apis/medialive-2017-10-14.normal.json +++ b/apis/medialive-2017-10-14.normal.json @@ -8411,6 +8411,36 @@ "shape": "TimecodeBurninSettings", "locationName": "timecodeBurninSettings", "documentation": "Timecode burn-in settings" + }, + "MvOverPictureBoundaries": { + "shape": "H265MvOverPictureBoundaries", + "locationName": "mvOverPictureBoundaries", + "documentation": "If you are setting up the picture as a tile, you must set this to \"disabled\". In all other configurations, you typically enter \"enabled\"." + }, + "MvTemporalPredictor": { + "shape": "H265MvTemporalPredictor", + "locationName": "mvTemporalPredictor", + "documentation": "If you are setting up the picture as a tile, you must set this to \"disabled\". In other configurations, you typically enter \"enabled\"." + }, + "TileHeight": { + "shape": "__integerMin64Max2160", + "locationName": "tileHeight", + "documentation": "Set this field to set up the picture as a tile. You must also set tileWidth.\nThe tile height must result in 22 or fewer rows in the frame. The tile width\nmust result in 20 or fewer columns in the frame. And finally, the product of the\ncolumn count and row count must be 64 of less.\nIf the tile width and height are specified, MediaLive will override the video\ncodec slices field with a value that MediaLive calculates" + }, + "TilePadding": { + "shape": "H265TilePadding", + "locationName": "tilePadding", + "documentation": "Set to \"padded\" to force MediaLive to add padding to the frame, to obtain a frame that is a whole multiple of the tile size.\nIf you are setting up the picture as a tile, you must enter \"padded\".\nIn all other configurations, you typically enter \"none\"." + }, + "TileWidth": { + "shape": "__integerMin256Max3840", + "locationName": "tileWidth", + "documentation": "Set this field to set up the picture as a tile. See tileHeight for more information." + }, + "TreeblockSize": { + "shape": "H265TreeblockSize", + "locationName": "treeblockSize", + "documentation": "Select the tree block size used for encoding. If you enter \"auto\", the encoder will pick the best size. If you are setting up the picture as a tile, you must set this to 32x32. In all other configurations, you typically enter \"auto\"." } }, "documentation": "H265 Settings", @@ -16716,6 +16746,50 @@ "shape": "ChannelPipelineIdToRestart" }, "documentation": "Placeholder documentation for __listOfChannelPipelineIdToRestart" + }, + "H265MvOverPictureBoundaries": { + "type": "string", + "documentation": "H265 Mv Over Picture Boundaries", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "H265MvTemporalPredictor": { + "type": "string", + "documentation": "H265 Mv Temporal Predictor", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, + "H265TilePadding": { + "type": "string", + "documentation": "H265 Tile Padding", + "enum": [ + "NONE", + "PADDED" + ] + }, + "H265TreeblockSize": { + "type": "string", + "documentation": "H265 Treeblock Size", + "enum": [ + "AUTO", + "TREE_SIZE_32X32" + ] + }, + "__integerMin256Max3840": { + "type": "integer", + "min": 256, + "max": 3840, + "documentation": "Placeholder documentation for __integerMin256Max3840" + }, + "__integerMin64Max2160": { + "type": "integer", + "min": 64, + "max": 2160, + "documentation": "Placeholder documentation for __integerMin64Max2160" } }, "documentation": "API for AWS Elemental MediaLive" diff --git a/apis/sagemaker-2017-07-24.normal.json b/apis/sagemaker-2017-07-24.normal.json index 5faee454ca..3eddc304ac 100644 --- a/apis/sagemaker-2017-07-24.normal.json +++ b/apis/sagemaker-2017-07-24.normal.json @@ -6403,7 +6403,89 @@ "ml.p4de.24xlarge", "ml.trn1.2xlarge", "ml.trn1.32xlarge", - "ml.trn1n.32xlarge" + "ml.trn1n.32xlarge", + "ml.p5.48xlarge", + "ml.m6i.large", + "ml.m6i.xlarge", + "ml.m6i.2xlarge", + "ml.m6i.4xlarge", + "ml.m6i.8xlarge", + "ml.m6i.12xlarge", + "ml.m6i.16xlarge", + "ml.m6i.24xlarge", + "ml.m6i.32xlarge", + "ml.m7i.large", + "ml.m7i.xlarge", + "ml.m7i.2xlarge", + "ml.m7i.4xlarge", + "ml.m7i.8xlarge", + "ml.m7i.12xlarge", + "ml.m7i.16xlarge", + "ml.m7i.24xlarge", + "ml.m7i.48xlarge", + "ml.c6i.large", + "ml.c6i.xlarge", + "ml.c6i.2xlarge", + "ml.c6i.4xlarge", + "ml.c6i.8xlarge", + "ml.c6i.12xlarge", + "ml.c6i.16xlarge", + "ml.c6i.24xlarge", + "ml.c6i.32xlarge", + "ml.c7i.large", + "ml.c7i.xlarge", + "ml.c7i.2xlarge", + "ml.c7i.4xlarge", + "ml.c7i.8xlarge", + "ml.c7i.12xlarge", + "ml.c7i.16xlarge", + "ml.c7i.24xlarge", + "ml.c7i.48xlarge", + "ml.r6i.large", + "ml.r6i.xlarge", + "ml.r6i.2xlarge", + "ml.r6i.4xlarge", + "ml.r6i.8xlarge", + "ml.r6i.12xlarge", + "ml.r6i.16xlarge", + "ml.r6i.24xlarge", + "ml.r6i.32xlarge", + "ml.r7i.large", + "ml.r7i.xlarge", + "ml.r7i.2xlarge", + "ml.r7i.4xlarge", + "ml.r7i.8xlarge", + "ml.r7i.12xlarge", + "ml.r7i.16xlarge", + "ml.r7i.24xlarge", + "ml.r7i.48xlarge", + "ml.m6id.large", + "ml.m6id.xlarge", + "ml.m6id.2xlarge", + "ml.m6id.4xlarge", + "ml.m6id.8xlarge", + "ml.m6id.12xlarge", + "ml.m6id.16xlarge", + "ml.m6id.24xlarge", + "ml.m6id.32xlarge", + "ml.c6id.large", + "ml.c6id.xlarge", + "ml.c6id.2xlarge", + "ml.c6id.4xlarge", + "ml.c6id.8xlarge", + "ml.c6id.12xlarge", + "ml.c6id.16xlarge", + "ml.c6id.24xlarge", + "ml.c6id.32xlarge", + "ml.r6id.large", + "ml.r6id.xlarge", + "ml.r6id.2xlarge", + "ml.r6id.4xlarge", + "ml.r6id.8xlarge", + "ml.r6id.12xlarge", + "ml.r6id.16xlarge", + "ml.r6id.24xlarge", + "ml.r6id.32xlarge" ] }, "AppList": { diff --git a/clients/codebuild.d.ts b/clients/codebuild.d.ts index 88744750e8..02dd09e2a2 100644 --- a/clients/codebuild.d.ts +++ b/clients/codebuild.d.ts @@ -416,7 +416,7 @@ declare namespace CodeBuild { export type ArtifactNamespace = "NONE"|"BUILD_ID"|string; export type ArtifactPackaging = "NONE"|"ZIP"|string; export type ArtifactsType = "CODEPIPELINE"|"S3"|"NO_ARTIFACTS"|string; - export type AuthType = "OAUTH"|"BASIC_AUTH"|"PERSONAL_ACCESS_TOKEN"|string; + export type AuthType = "OAUTH"|"BASIC_AUTH"|"PERSONAL_ACCESS_TOKEN"|"CODECONNECTIONS"|string; export interface BatchDeleteBuildsInput { /** * The IDs of the builds to delete. @@ -1493,7 +1493,7 @@ declare namespace CodeBuild { export type FleetSortByType = "NAME"|"CREATED_TIME"|"LAST_MODIFIED_TIME"|string; export interface FleetStatus { /** - * The status code of the compute fleet. Valid values include: CREATING: The compute fleet is being created. UPDATING: The compute fleet is being updated. ROTATING: The compute fleet is being rotated. DELETING: The compute fleet is being deleted. CREATE_FAILED: The compute fleet has failed to create. UPDATE_ROLLBACK_FAILED: The compute fleet has failed to update and could not rollback to previous state. ACTIVE: The compute fleet has succeeded and is active. + * The status code of the compute fleet. Valid values include: CREATING: The compute fleet is being created. UPDATING: The compute fleet is being updated. ROTATING: The compute fleet is being rotated. PENDING_DELETION: The compute fleet is pending deletion. DELETING: The compute fleet is being deleted. CREATE_FAILED: The compute fleet has failed to create. UPDATE_ROLLBACK_FAILED: The compute fleet has failed to update and could not rollback to previous state. ACTIVE: The compute fleet has succeeded and is active. */ statusCode?: FleetStatusCode; /** @@ -2251,11 +2251,11 @@ declare namespace CodeBuild { export type ProjectSortByType = "NAME"|"CREATED_TIME"|"LAST_MODIFIED_TIME"|string; export interface ProjectSource { /** - * The type of repository that contains the source code to be built. Valid values include: BITBUCKET: The source code is in a Bitbucket repository. CODECOMMIT: The source code is in an CodeCommit repository. CODEPIPELINE: The source code settings are specified in the source action of a pipeline in CodePipeline. GITHUB: The source code is in a GitHub or GitHub Enterprise Cloud repository. GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise Server repository. NO_SOURCE: The project does not have input source code. S3: The source code is in an Amazon S3 bucket. + * The type of repository that contains the source code to be built. Valid values include: BITBUCKET: The source code is in a Bitbucket repository. CODECOMMIT: The source code is in an CodeCommit repository. CODEPIPELINE: The source code settings are specified in the source action of a pipeline in CodePipeline. GITHUB: The source code is in a GitHub repository. GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise Server repository. GITLAB: The source code is in a GitLab repository. GITLAB_SELF_MANAGED: The source code is in a self-managed GitLab repository. NO_SOURCE: The project does not have input source code. S3: The source code is in an Amazon S3 bucket. */ type: SourceType; /** - * Information about the location of the source code to be built. Valid values include: For source code settings that are specified in the source action of a pipeline in CodePipeline, location should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value. For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>). For source code in an Amazon S3 input bucket, one of the following. The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip). The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/). For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitHub account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH. For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH. If you specify CODEPIPELINE for the Type property, don't specify this property. For all of the other types, you must specify Location. + * Information about the location of the source code to be built. Valid values include: For source code settings that are specified in the source action of a pipeline in CodePipeline, location should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value. For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, https://git-codecommit.<region-ID>.amazonaws.com/v1/repos/<repo-name>). For source code in an Amazon S3 input bucket, one of the following. The path to the ZIP file that contains the source code (for example, <bucket-name>/<path>/<object-name>.zip). The path to the folder that contains the source code (for example, <bucket-name>/<path-to-source-code>/<folder>/). For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitHub account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub Authorize application page, for Organization access, choose Request access next to each repository you want to allow CodeBuild to have access to, and then choose Authorize application. (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH. For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your GitLab account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections Authorize application page, choose Authorize. Then on the CodeStar Connections Create GitLab connection page, choose Connect to GitLab. (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to override the default connection and use this connection instead, set the auth object's type value to CODECONNECTIONS in the source object. For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your account page, choose Grant access. (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the CodeBuild console.) To instruct CodeBuild to use this connection, in the source object, set the auth object's type value to OAUTH. If you specify CODEPIPELINE for the Type property, don't specify this property. For all of the other types, you must specify Location. */ location?: String; /** @@ -2275,7 +2275,7 @@ declare namespace CodeBuild { */ auth?: SourceAuth; /** - * Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown. To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide. The status of a build triggered by a webhook is always reported to your source provider. If your project's builds are triggered by a webhook, you must push a new commit to the repo for a change to this property to take effect. + * Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown. To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide. The status of a build triggered by a webhook is always reported to your source provider. If your project's builds are triggered by a webhook, you must push a new commit to the repo for a change to this property to take effect. */ reportBuildStatus?: WrapperBoolean; /** @@ -2297,7 +2297,7 @@ declare namespace CodeBuild { */ sourceIdentifier: String; /** - * The source version for the corresponding source identifier. If specified, must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. + * The source version for the corresponding source identifier. If specified, must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub or GitLab: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. */ sourceVersion: String; } @@ -2591,12 +2591,12 @@ declare namespace CodeBuild { export type SecurityGroupIds = NonEmptyString[]; export type SensitiveNonEmptyString = string; export type SensitiveString = string; - export type ServerType = "GITHUB"|"BITBUCKET"|"GITHUB_ENTERPRISE"|string; + export type ServerType = "GITHUB"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"GITLAB"|"GITLAB_SELF_MANAGED"|string; export type SharedResourceSortByType = "ARN"|"MODIFIED_TIME"|string; export type SortOrderType = "ASCENDING"|"DESCENDING"|string; export interface SourceAuth { /** - * This data type is deprecated and is no longer accurate or used. The authorization type to use. The only valid value is OAUTH, which represents the OAuth authorization type. + * The authorization type to use. Valid options are OAUTH or CODECONNECTIONS. */ type: SourceAuthType; /** @@ -2604,23 +2604,27 @@ declare namespace CodeBuild { */ resource?: String; } - export type SourceAuthType = "OAUTH"|string; + export type SourceAuthType = "OAUTH"|"CODECONNECTIONS"|string; export interface SourceCredentialsInfo { /** * The Amazon Resource Name (ARN) of the token. */ arn?: NonEmptyString; /** - * The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, or BITBUCKET. + * The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET. */ serverType?: ServerType; /** - * The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, or PERSONAL_ACCESS_TOKEN. + * The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS. */ authType?: AuthType; + /** + * The connection ARN if your serverType type is GITLAB or GITLAB_SELF_MANAGED and your authType is CODECONNECTIONS. + */ + resource?: String; } export type SourceCredentialsInfos = SourceCredentialsInfo[]; - export type SourceType = "CODECOMMIT"|"CODEPIPELINE"|"GITHUB"|"S3"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"NO_SOURCE"|string; + export type SourceType = "CODECOMMIT"|"CODEPIPELINE"|"GITHUB"|"GITLAB"|"GITLAB_SELF_MANAGED"|"S3"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"NO_SOURCE"|string; export interface StartBuildBatchInput { /** * The name of the project. diff --git a/clients/ec2.d.ts b/clients/ec2.d.ts index d95a0d2e29..ae76257669 100644 --- a/clients/ec2.d.ts +++ b/clients/ec2.d.ts @@ -3532,6 +3532,14 @@ declare class EC2 extends Service { * Gets the current state of block public access for AMIs at the account level in the specified Amazon Web Services Region. For more information, see Block public access to your AMIs in the Amazon EC2 User Guide. */ getImageBlockPublicAccessState(callback?: (err: AWSError, data: EC2.Types.GetImageBlockPublicAccessStateResult) => void): Request>1,l=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=a?0:s-1,y=a?1:-1,b=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(n=isNaN(t)?1:0,o=m):(o=Math.floor(Math.log(t)/Math.LN2),t*(u=Math.pow(2,-o))<1&&(o--,u*=2),t+=o+c>=1?l/u:l*Math.pow(2,1-c),t*u>=2&&(o++,u/=2),o+c>=m?(n=0,o=m):o+c>=1?(n=(t*u-1)*Math.pow(2,i),o+=c):(n=t*Math.pow(2,c-1)*Math.pow(2,i),o=0));i>=8;e[r+d]=255&n,d+=y,n/=256,i-=8);for(o=o<0;e[r+d]=255&o,d+=y,o/=256,p-=8);e[r+d-y]|=128*b}},{}],443:[function(e,t,r){var a={}.toString;t.exports=Array.isArray||function(e){return"[object Array]"==a.call(e)}},{}],444:[function(e,t,r){!function(e){"use strict";function t(e){return null!==e&&"[object Array]"===Object.prototype.toString.call(e)}function r(e){return null!==e&&"[object Object]"===Object.prototype.toString.call(e)}function a(e,i){if(e===i)return!0;if(Object.prototype.toString.call(e)!==Object.prototype.toString.call(i))return!1;if(!0===t(e)){if(e.length!==i.length)return!1;for(var s=0;s G((f-r)/g)&&i("overflow"),r+=(p-t)*g,t=p,u=0;u =0?(c=b.substr(0,S),l=b.substr(S+1)):(c=b,l=""),d=decodeURIComponent(c),y=decodeURIComponent(l),a(o,d)?i(o[d])?o[d].push(y):o[d]=[o[d],y]:o[d]=y}return o};var i=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},{}],448:[function(e,t,r){"use strict";function a(e,t){if(e.map)return e.map(t);for(var r=[],a=0;a >1,l=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=a?0:s-1,y=a?1:-1,b=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(n=isNaN(t)?1:0,o=m):(o=Math.floor(Math.log(t)/Math.LN2),t*(u=Math.pow(2,-o))<1&&(o--,u*=2),t+=o+c>=1?l/u:l*Math.pow(2,1-c),t*u>=2&&(o++,u/=2),o+c>=m?(n=0,o=m):o+c>=1?(n=(t*u-1)*Math.pow(2,i),o+=c):(n=t*Math.pow(2,c-1)*Math.pow(2,i),o=0));i>=8;e[r+d]=255&n,d+=y,n/=256,i-=8);for(o=o<0;e[r+d]=255&o,d+=y,o/=256,p-=8);e[r+d-y]|=128*b}},{}],443:[function(e,t,r){var a={}.toString;t.exports=Array.isArray||function(e){return"[object Array]"==a.call(e)}},{}],444:[function(e,t,r){!function(e){"use strict";function t(e){return null!==e&&"[object Array]"===Object.prototype.toString.call(e)}function r(e){return null!==e&&"[object Object]"===Object.prototype.toString.call(e)}function a(e,i){if(e===i)return!0;if(Object.prototype.toString.call(e)!==Object.prototype.toString.call(i))return!1;if(!0===t(e)){if(e.length!==i.length)return!1;for(var s=0;s G((f-r)/g)&&i("overflow"),r+=(p-t)*g,t=p,u=0;u =0?(c=b.substr(0,S),l=b.substr(S+1)):(c=b,l=""),d=decodeURIComponent(c),y=decodeURIComponent(l),a(o,d)?i(o[d])?o[d].push(y):o[d]=[o[d],y]:o[d]=y}return o};var i=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},{}],448:[function(e,t,r){"use strict";function a(e,t){if(e.map)return e.map(t);for(var r=[],a=0;a=55296&&t<=56319&&i65535&&(e-=65536,t+=w(e>>>10&1023|55296),e=56320|1023&e),t+=w(e)}).join("")}function p(e){return e-48<10?e-22:e-65<26?e-65:e-97<26?e-97:T}function m(e,t){return e+22+75*(e<26)-((0!=t)<<5)}function c(e,t,r){var a=0;for(e=r?G(e/R):e>>1,e+=G(e/t);e>L*k>>1;a+=T)e=G(e/L);return G(a+(L+1)*e/(e+A))}function l(e){var t,r,a,s,o,n,m,l,d,y,b=[],S=e.length,g=0,h=v,I=D;for(r=e.lastIndexOf(x),r<0&&(r=0),a=0;a=S&&i("invalid-input"),l=p(e.charCodeAt(s++)),(l>=T||l>G((f-g)/n))&&i("overflow"),g+=l*n,d=m<=I?C:m>=I+k?k:m-I,!(l=t&&bf&&i("overflow"),b==t){for(l=r,d=T;y=d<=o?C:d>=o+k?k:d-o,!(l=0&&delete e.httpRequest.headers["Content-Length"]}function i(e){var t=new l,r=e.service.api.operations[e.operation].input;if(r.payload){var a={},i=r.members[r.payload];a=e.params[r.payload],"structure"===i.type?(e.httpRequest.body=t.build(a||{},i),s(e)):void 0!==a&&(e.httpRequest.body=a,("binary"===i.type||i.isStreaming)&&s(e,!0))}else e.httpRequest.body=t.build(e.params,r),s(e)}function s(e,t){if(!e.httpRequest.headers["Content-Type"]){var r=t?"binary/octet-stream":"application/json";e.httpRequest.headers["Content-Type"]=r}}function o(e){m.buildRequest(e),y.indexOf(e.httpRequest.method)<0&&i(e)}function n(e){c.extractError(e)}function u(e){m.extractData(e);var t,r=e.request,a=r.service.api.operations[r.operation],i=r.service.api.operations[r.operation].output||{};a.hasEventOutput;if(i.payload){var s=i.members[i.payload],o=e.httpResponse.body;if(s.isEventStream)t=new d,e.data[payload]=p.createEventStream(2===AWS.HttpClient.streamsApiVersion?e.httpResponse.stream:o,t,s);else if("structure"===s.type||"list"===s.type){var t=new d;e.data[i.payload]=t.parse(o,s)}else"binary"===s.type||s.isStreaming?e.data[i.payload]=o:e.data[i.payload]=s.toType(o)}else{var n=e.data;c.extractData(e),e.data=p.merge(n,e.data)}}var p=e("../util"),m=e("./rest"),c=e("./json"),l=e("../json/builder"),d=e("../json/parser"),y=["GET","HEAD","DELETE"];t.exports={buildRequest:o,extractError:n,extractData:u,unsetContentLength:a}},{"../json/builder":374,"../json/parser":375,"../util":428,"./json":386,"./rest":388}],390:[function(e,t,r){function a(e){var t=e.service.api.operations[e.operation].input,r=new n.XML.Builder,a=e.params,i=t.payload;if(i){var s=t.members[i];if(void 0===(a=a[i]))return;if("structure"===s.type){var o=s.name;e.httpRequest.body=r.toXML(a,s,o,!0)}else e.httpRequest.body=a}else e.httpRequest.body=r.toXML(a,t,t.name||t.shape||u.string.upperFirst(e.operation)+"Request")}function i(e){p.buildRequest(e),["GET","HEAD"].indexOf(e.httpRequest.method)<0&&a(e)}function s(e){p.extractError(e);var t;try{t=(new n.XML.Parser).parse(e.httpResponse.body.toString())}catch(r){t={Code:e.httpResponse.statusCode,Message:e.httpResponse.statusMessage}}t.Errors&&(t=t.Errors),t.Error&&(t=t.Error),t.Code?e.error=u.error(new Error,{code:t.Code,message:t.Message}):e.error=u.error(new Error,{code:e.httpResponse.statusCode,message:null})}function o(e){p.extractData(e);var t,r=e.request,a=e.httpResponse.body,i=r.service.api.operations[r.operation],s=i.output,o=(i.hasEventOutput,s.payload);if(o){var m=s.members[o];m.isEventStream?(t=new n.XML.Parser,e.data[o]=u.createEventStream(2===n.HttpClient.streamsApiVersion?e.httpResponse.stream:e.httpResponse.body,t,m)):"structure"===m.type?(t=new n.XML.Parser,e.data[o]=t.parse(a.toString(),m)):"binary"===m.type||m.isStreaming?e.data[o]=a:e.data[o]=m.toType(a)}else if(a.length>0){t=new n.XML.Parser;var c=t.parse(a.toString(),s);u.update(e.data,c)}}var n=e("../core"),u=e("../util"),p=e("./rest");t.exports={buildRequest:i,extractError:s,extractData:o}},{"../core":350,"../util":428,"./rest":388}],391:[function(e,t,r){function a(){}function i(e){return e.isQueryName||"ec2"!==e.api.protocol?e.name:e.name[0].toUpperCase()+e.name.substr(1)}function s(e,t,r,a){p.each(r.members,function(r,s){var o=t[r];if(null!==o&&void 0!==o){var n=i(s);n=e?e+"."+n:n,u(n,o,s,a)}})}function o(e,t,r,a){var i=1;p.each(t,function(t,s){var o=r.flattened?".":".entry.",n=o+i+++".",p=n+(r.key.name||"key"),m=n+(r.value.name||"value");u(e+p,t,r.key,a),u(e+m,s,r.value,a)})}function n(e,t,r,a){var s=r.member||{};if(0===t.length)return void a.call(this,e,null);p.arrayEach(t,function(t,o){var n="."+(o+1);if("ec2"===r.api.protocol)n+="";else if(r.flattened){if(s.name){var p=e.split(".");p.pop(),p.push(i(s)),e=p.join(".")}}else n="."+(s.name?s.name:"member")+n;u(e+n,t,s,a)})}function u(e,t,r,a){null!==t&&void 0!==t&&("structure"===r.type?s(e,t,r,a):"list"===r.type?n(e,t,r,a):"map"===r.type?o(e,t,r,a):a(e,r.toWireFormat(t).toString()))}var p=e("../util");a.prototype.serialize=function(e,t,r){s("",e,t,r)},t.exports=a},{"../util":428}],392:[function(e,t,r){var a=e("../core"),i=null,s={signatureVersion:"v4",signingName:"rds-db",operations:{}},o={region:"string",hostname:"string",port:"number",username:"string"};a.RDS.Signer=a.util.inherit({constructor:function(e){this.options=e||{}},convertUrlToAuthToken:function(e){if(0===e.indexOf("https://"))return e.substring("https://".length)},getAuthToken:function(e,t){"function"==typeof e&&void 0===t&&(t=e,e={});var r=this,o="function"==typeof t;e=a.util.merge(this.options,e);var n=this.validateAuthTokenOptions(e);if(!0!==n){if(o)return t(n,null);throw n}var u={region:e.region,endpoint:new a.Endpoint(e.hostname+":"+e.port),paramValidation:!1,signatureVersion:"v4"};e.credentials&&(u.credentials=e.credentials),i=new a.Service(u),i.api=s;var p=i.makeRequest();if(this.modifyRequestForAuthToken(p,e),!o){var m=p.presign(900);return this.convertUrlToAuthToken(m)}p.presign(900,function(e,a){a&&(a=r.convertUrlToAuthToken(a)),t(e,a)})},modifyRequestForAuthToken:function(e,t){e.on("build",e.buildAsGet),e.httpRequest.body=a.util.queryParamsToString({Action:"connect",DBUser:t.username})},validateAuthTokenOptions:function(e){var t="";e=e||{};for(var r in o)Object.prototype.hasOwnProperty.call(o,r)&&typeof e[r]!==o[r]&&(t+="option '"+r+"' should have been type '"+o[r]+"', was '"+typeof e[r]+"'.\n");return!t.length||a.util.error(new Error,{code:"InvalidParameter",message:t})}})},{"../core":350}],393:[function(e,t,r){t.exports={now:function(){return"undefined"!=typeof performance&&"function"==typeof performance.now?performance.now():Date.now()}}},{}],394:[function(e,t,r){function a(e){return"string"==typeof e&&(e.startsWith("fips-")||e.endsWith("-fips"))}function i(e){return"string"==typeof e&&["aws-global","aws-us-gov-global"].includes(e)}function s(e){return["fips-aws-global","aws-fips","aws-global"].includes(e)?"us-east-1":["fips-aws-us-gov-global","aws-us-gov-global"].includes(e)?"us-gov-west-1":e.replace(/fips-(dkr-|prod-)?|-fips/,"")}t.exports={isFipsRegion:a,isGlobalRegion:i,getRealRegion:s}},{}],395:[function(e,t,r){function a(e){if(!e)return null;var t=e.split("-");return t.length<3?null:t.slice(0,t.length-2).join("-")+"-*"}function i(e){var t=e.config.region,r=a(t),i=e.api.endpointPrefix;return[[t,i],[r,i],[t,"*"],[r,"*"],["*",i],[t,"internal-*"],["*","*"]].map(function(e){return e[0]&&e[1]?e.join("/"):null})}function s(e,t){u.each(t,function(t,r){"globalEndpoint"!==t&&(void 0!==e.config[t]&&null!==e.config[t]||(e.config[t]=r))})}function o(e){for(var t=i(e),r=e.config.useFipsEndpoint,a=e.config.useDualstackEndpoint,o=0;o=a())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+a().toString(16)+" bytes");return 0|e}function b(e){return+e!=e&&(e=0),s.alloc(+e)}function S(e,t){if(s.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var r=e.length;if(0===r)return 0;for(var a=!1;;)switch(t){case"ascii":case"latin1":case"binary":return r;case"utf8":case"utf-8":case void 0:return K(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*r;case"hex":return r>>>1;case"base64":return H(e).length;default:if(a)return K(e).length;t=(""+t).toLowerCase(),a=!0}}function g(e,t,r){var a=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===r||r>this.length)&&(r=this.length),r<=0)return"";if(r>>>=0,t>>>=0,r<=t)return"";for(e||(e="utf8");;)switch(e){case"hex":return E(this,t,r);case"utf8":case"utf-8":return v(this,t,r);case"ascii":return P(this,t,r);case"latin1":case"binary":return q(this,t,r);case"base64":return D(this,t,r);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return M(this,t,r);default:if(a)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),a=!0}}function h(e,t,r){var a=e[t];e[t]=e[r],e[r]=a}function I(e,t,r,a,i){if(0===e.length)return-1;if("string"==typeof r?(a=r,r=0):r>2147483647?r=2147483647:r<-2147483648&&(r=-2147483648),r=+r,isNaN(r)&&(r=i?0:e.length-1),r<0&&(r=e.length+r),r>=e.length){if(i)return-1;r=e.length-1}else if(r<0){if(!i)return-1;r=0}if("string"==typeof t&&(t=s.from(t,a)),s.isBuffer(t))return 0===t.length?-1:N(e,t,r,a,i);if("number"==typeof t)return t&=255,s.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?i?Uint8Array.prototype.indexOf.call(e,t,r):Uint8Array.prototype.lastIndexOf.call(e,t,r):N(e,[t],r,a,i);throw new TypeError("val must be string, number or Buffer")}function N(e,t,r,a,i){function s(e,t){return 1===o?e[t]:e.readUInt16BE(t*o)}var o=1,n=e.length,u=t.length;if(void 0!==a&&("ucs2"===(a=String(a).toLowerCase())||"ucs-2"===a||"utf16le"===a||"utf-16le"===a)){if(e.length<2||t.length<2)return-1;o=2,n/=2,u/=2,r/=2}var p;if(i){var m=-1;for(p=r;p>>8*(a?i:1-i)}function B(e,t,r,a){t<0&&(t=4294967295+t+1);for(var i=0,s=Math.min(e.length-r,4);i>>8*(a?i:3-i)&255}function U(e,t,r,a,i,s){if(r+a>e.length)throw new RangeError("Index out of range");if(r<0)throw new RangeError("Index out of range")}function _(e,t,r,a,i){return i||U(e,t,r,4,3.4028234663852886e38,-3.4028234663852886e38),X.write(e,t,r,a,23,4),r+4}function F(e,t,r,a,i){return i||U(e,t,r,8,1.7976931348623157e308,-1.7976931348623157e308),X.write(e,t,r,a,52,8),r+8}function O(e){if(e=V(e).replace(ee,""),e.length<2)return"";for(;e.length%4!=0;)e+="=";return e}function V(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}function z(e){return e<16?"0"+e.toString(16):e.toString(16)}function K(e,t){t=t||1/0;for(var r,a=e.length,i=null,s=[],o=0;o55295&&r<57344){if(!i){if(r>56319){(t-=3)>-1&&s.push(239,191,189);continue}if(o+1===a){(t-=3)>-1&&s.push(239,191,189);continue}i=r;continue}if(r<56320){(t-=3)>-1&&s.push(239,191,189),i=r;continue}r=65536+(i-55296<<10|r-56320)}else i&&(t-=3)>-1&&s.push(239,191,189);if(i=null,r<128){if((t-=1)<0)break;s.push(r)}else if(r<2048){if((t-=2)<0)break;s.push(r>>6|192,63&r|128)}else if(r<65536){if((t-=3)<0)break;s.push(r>>12|224,r>>6&63|128,63&r|128)}else{if(!(r<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;s.push(r>>18|240,r>>12&63|128,r>>6&63|128,63&r|128)}}return s}function j(e){for(var t=[],r=0;r=55296&&t<=56319&&i65535&&(e-=65536,t+=w(e>>>10&1023|55296),e=56320|1023&e),t+=w(e)}).join("")}function p(e){return e-48<10?e-22:e-65<26?e-65:e-97<26?e-97:T}function m(e,t){return e+22+75*(e<26)-((0!=t)<<5)}function c(e,t,r){var a=0;for(e=r?G(e/R):e>>1,e+=G(e/t);e>L*k>>1;a+=T)e=G(e/L);return G(a+(L+1)*e/(e+A))}function l(e){var t,r,a,s,o,n,m,l,d,y,b=[],S=e.length,g=0,h=v,I=D;for(r=e.lastIndexOf(x),r<0&&(r=0),a=0;a=S&&i("invalid-input"),l=p(e.charCodeAt(s++)),(l>=T||l>G((f-g)/n))&&i("overflow"),g+=l*n,d=m<=I?C:m>=I+k?k:m-I,!(l=t&&bf&&i("overflow"),b==t){for(l=r,d=T;y=d<=o?C:d>=o+k?k:d-o,!(l