From 62c27b55b6c2bf04d0ae8ef9debb629b6e5b8db7 Mon Sep 17 00:00:00 2001
From: George Fu Enables performance mode for the branch. Performance mode optimizes for faster hosting performance by keeping content cached at
+ * Performance mode optimizes for faster hosting performance by keeping content cached at
* the edge for a longer interval. When performance mode is enabled, hosting configuration
* or code changes can take up to 10 minutes to roll out. The status code for a URL rewrite or redirect rule. Represents a 200 rewrite rule. Represents a 200 rewrite rule. Represents a 301 (moved pemanently) redirect rule. This and all future
+ * Represents a 301 (moved pemanently) redirect rule. This and all future
* requests should be directed to the target URL. Represents a 302 temporary redirect rule. Represents a 302 temporary redirect rule. Represents a 404 redirect rule. Represents a 404 redirect rule. Represents a 404 rewrite rule. Represents a 404 rewrite rule. The OAuth token for a third-party source control system for an Amplify app. The OAuth
* token is used to create a webhook and a read-only deploy key using SSH cloning. The
* OAuth token is not stored. Use Use You must specify either You must specify either Existing Amplify apps deployed from a GitHub repository using OAuth continue to work
+ * Existing Amplify apps deployed from a GitHub repository using OAuth continue to work
* with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub
* App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the
* Amplify User Guide . The personal access token for a GitHub repository for an Amplify app. The personal
* access token is used to authorize access to a GitHub repository using the Amplify GitHub
* App. The token is not stored. Use Use You must specify either You must specify either Existing Amplify apps deployed from a GitHub repository using OAuth continue to work
+ * Existing Amplify apps deployed from a GitHub repository using OAuth continue to work
* with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub
* App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the
* Amplify User Guide . This is for internal use. The Amplify service uses this parameter to specify the authentication protocol to use to access
+ *
+ * The Amplify service uses this parameter to specify the authentication protocol to use to access
* the Git repository for an Amplify app. Amplify specifies Enables performance mode for the branch. Performance mode optimizes for faster hosting performance by keeping content cached at
+ * Performance mode optimizes for faster hosting performance by keeping content cached at
* the edge for a longer interval. When performance mode is enabled, hosting configuration
* or code changes can take up to 10 minutes to roll out. Enables performance mode for the branch. Performance mode optimizes for faster hosting performance by keeping content cached at
+ * Performance mode optimizes for faster hosting performance by keeping content cached at
* the edge for a longer interval. When performance mode is enabled, hosting configuration
* or code changes can take up to 10 minutes to roll out. The OAuth token for a third-party source control system for an Amplify app. The OAuth
* token is used to create a webhook and a read-only deploy key using SSH cloning. The
* OAuth token is not stored. Use Use To authorize access to GitHub as your repository provider, use
+ * To authorize access to GitHub as your repository provider, use
* You must specify either You must specify either Existing Amplify apps deployed from a GitHub repository using OAuth continue to work
+ * Existing Amplify apps deployed from a GitHub repository using OAuth continue to work
* with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub
* App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the
* Amplify User Guide . The personal access token for a GitHub repository for an Amplify app. The personal
* access token is used to authorize access to a GitHub repository using the Amplify GitHub
* App. The token is not stored. Use Use You must specify either You must specify either Existing Amplify apps deployed from a GitHub repository using OAuth continue to work
+ * Existing Amplify apps deployed from a GitHub repository using OAuth continue to work
* with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub
* App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the
* Amplify User Guide . Enables performance mode for the branch. Performance mode optimizes for faster hosting performance by keeping content cached at
+ * Performance mode optimizes for faster hosting performance by keeping content cached at
* the edge for a longer interval. When performance mode is enabled, hosting configuration
* or code changes can take up to 10 minutes to roll out. CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code,
+ * CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code,
* runs unit tests, and produces artifacts that are ready to deploy. CodeBuild eliminates the
* need to provision, manage, and scale your own build servers. It provides prepackaged
* build environments for the most popular programming languages and build tools, such as
* Apache Maven, Gradle, and more. You can also fully customize build environments in CodeBuild
* to use your own build tools. CodeBuild scales automatically to meet peak build requests. You
* pay only for the build time you consume. For more information about CodeBuild, see the
- * CodeBuild User
+ * CodeBuild User
* Guide.
* CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code,
+ * CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code,
* runs unit tests, and produces artifacts that are ready to deploy. CodeBuild eliminates the
* need to provision, manage, and scale your own build servers. It provides prepackaged
* build environments for the most popular programming languages and build tools, such as
* Apache Maven, Gradle, and more. You can also fully customize build environments in CodeBuild
* to use your own build tools. CodeBuild scales automatically to meet peak build requests. You
* pay only for the build time you consume. For more information about CodeBuild, see the
- * CodeBuild User
+ * CodeBuild User
* Guide.
* CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code,
+ * CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code,
* runs unit tests, and produces artifacts that are ready to deploy. CodeBuild eliminates the
* need to provision, manage, and scale your own build servers. It provides prepackaged
* build environments for the most popular programming languages and build tools, such as
* Apache Maven, Gradle, and more. You can also fully customize build environments in CodeBuild
* to use your own build tools. CodeBuild scales automatically to meet peak build requests. You
* pay only for the build time you consume. For more information about CodeBuild, see the
- * CodeBuild User
+ * CodeBuild User
* Guide.
* The SHA-256 hash of the build artifact. You can use this hash along with a checksum tool to confirm file integrity and
+ * You can use this hash along with a checksum tool to confirm file integrity and
* authenticity. This value is available only if the build project's The MD5 hash of the build artifact. You can use this hash along with a checksum tool to confirm file integrity and
+ * You can use this hash along with a checksum tool to confirm file integrity and
* authenticity. This value is available only if the build project's
+ *
*
*/
status?: string;
@@ -223,12 +222,12 @@ export interface CreateAppRequest {
* oauthToken
for repository providers other than GitHub, such as
+ * oauthToken
for repository providers other than GitHub, such as
* Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use
* accessToken
.oauthToken
or accessToken
when you
+ * oauthToken
or accessToken
when you
* create a new app.accessToken
for GitHub repositories only. To authorize access to a
+ * accessToken
for GitHub repositories only. To authorize access to a
* repository provider such as Bitbucket or CodeCommit, use oauthToken
.accessToken
or oauthToken
when you
+ * accessToken
or oauthToken
when you
* create a new app.TOKEN
for a GitHub
* repository, SIGV4
for an Amazon Web Services CodeCommit repository, and
* SSH
for GitLab and Bitbucket repositories.oauthToken
for repository providers other than GitHub, such as
+ * oauthToken
for repository providers other than GitHub, such as
* Bitbucket or CodeCommit.accessToken
.oauthToken
or accessToken
when you
+ * oauthToken
or accessToken
when you
* update an app.accessToken
for GitHub repositories only. To authorize access to a
+ * accessToken
for GitHub repositories only. To authorize access to a
* repository provider such as Bitbucket or CodeCommit, use oauthToken
.accessToken
or oauthToken
when you
+ * accessToken
or oauthToken
when you
* update an app.packaging
value
* is set to ZIP
.packaging
value
* is set to ZIP
.
To use this property, your CodeBuild service role must have the
* s3:PutBucketAcl
permission. This permission allows CodeBuild to modify
* the access control list for the bucket.
This property can be one of the following values:
+ *This property can be one of the following values:
*The bucket owner does not have access to the objects. This is the + *
The bucket owner does not have access to the objects. This is the * default.
*The bucket owner has read-only access to the objects. The uploading account + *
The bucket owner has read-only access to the objects. The uploading account * retains ownership of the objects.
*The bucket owner has full access to the objects. Object ownership is determined + *
The bucket owner has full access to the objects. Object ownership is determined * by the following criteria:
- *If the bucket is configured with the Bucket + *
If the bucket is configured with the Bucket * owner preferred setting, the bucket owner owns the * objects. The uploading account will have object access as specified * by the bucket's policy.
- *Otherwise, the uploading account retains ownership of the + *
Otherwise, the uploading account retains ownership of the * objects.
- *For more information about Amazon S3 object ownership, see Controlling ownership of uploaded objects using S3
+ * For more information about Amazon S3 object ownership, see Controlling ownership of uploaded objects using S3
* Object Ownership in the Amazon Simple Storage Service User
* Guide.
The status of the build group.
- *The build group failed.
- *The build group failed.
+ * *The build group faulted.
- *The build group faulted.
+ * *The build group is still in progress.
- *The build group is still in progress.
+ * *The build group stopped.
- *The build group stopped.
+ * *The build group succeeded.
- *The build group succeeded.
+ * *The build group timed out.
- *The build group timed out.
+ * *The type of cache used by the build project. Valid values include:
- *+ *
* NO_CACHE
: The build project does not use any cache.
+ *
* S3
: The build project reads and writes from and to S3.
+ *
* LOCAL
: The build project stores a cache locally on a build host
* that is only available to that build host.
Information about the cache location:
- *+ *
* NO_CACHE
or LOCAL
: This value is ignored.
+ *
* S3
: This is the S3 bucket name/prefix.
An array of strings that specify the local cache modes. You can use one or more local
* cache modes at the same time. This is only used for LOCAL
cache
* types.
Possible values are:
- *Possible values are:
+ *Caches Git metadata for primary and secondary sources. After the cache is + *
Caches Git metadata for primary and secondary sources. After the cache is * created, subsequent builds pull only the change between commits. This mode * is a good choice for projects with a clean working directory and a source * that is a large Git repository. If you choose this option and your project * does not use a Git repository (GitHub, GitHub Enterprise, or Bitbucket), the * option is ignored.
- *Caches existing Docker layers. This mode is a good choice for projects + *
Caches existing Docker layers. This mode is a good choice for projects * that build or pull large Docker images. It can prevent the performance * issues caused by pulling large Docker images down from the network.
- *You can use a Docker layer cache in the Linux environment + *
You can use a Docker layer cache in the Linux environment * only.
- *The privileged
flag must be set so that your
+ *
The privileged
flag must be set so that your
* project has the required Docker permissions.
You should consider the security implications before you use a + *
You should consider the security implications before you use a * Docker layer cache.
- *Caches directories you specify in the buildspec file. This mode is a good + *
Caches directories you specify in the buildspec file. This mode is a good * choice if your build scenario is not suited to one of the other three local * cache modes. If you use a custom cache:
- *Only directories can be specified for caching. You cannot specify + *
Only directories can be specified for caching. You cannot specify * individual files.
- *Symlinks are used to reference cached directories.
- *Symlinks are used to reference cached directories.
+ * *Cached directories are linked to your build before it downloads + *
Cached directories are linked to your build before it downloads * its project sources. Cached items are overridden if a source item * has the same name. Directories are specified using cache paths in * the buildspec file.
- *The value of the environment variable.
- *We strongly discourage the use of PLAINTEXT
environment variables to
* store sensitive values, especially Amazon Web Services secret key IDs and secret access keys.
* PLAINTEXT
environment variables can be displayed in plain text
* using the CodeBuild console and the CLI. For sensitive values, we recommend you use an
* environment variable of type PARAMETER_STORE
or
* SECRETS_MANAGER
.
The type of environment variable. Valid values include:
- *+ *
* PARAMETER_STORE
: An environment variable stored in Systems Manager
* Parameter Store. To learn how to specify a parameter store environment variable,
* see env/parameter-store in the
* CodeBuild User Guide.
+ *
* PLAINTEXT
: An environment variable in plain text format. This is
* the default value.
+ *
* SECRETS_MANAGER
: An environment variable stored in Secrets Manager. To learn how to specify a secrets manager environment variable, see
* env/secrets-manager in the
* CodeBuild User Guide.
Information about credentials that provide access to a private Docker registry. When * this is set:
- *
- * imagePullCredentialsType
must be set to SERVICE_ROLE
.
+ *
+ * imagePullCredentialsType
must be set to SERVICE_ROLE
.
*
images cannot be curated or an Amazon ECR image.
+ *images cannot be curated or an Amazon ECR image.
* For more information, see Private Registry with
+ * For more information, see Private Registry with
* Secrets Manager Sample for CodeBuild. The Amazon Resource Name (ARN) or name of credentials created using Secrets Manager. The The type of build environment to use for related builds. The environment type The environment type The environment type The environment type The environment type The environment type The environment types The environment types For more information, see Build environment compute types in the CodeBuild
+ * For more information, see Build environment compute types in the CodeBuild
* user guide. The image tag or image digest that identifies the Docker image to use for this build
* project. Use the following formats: For an image tag: For an image tag: For an image digest: For an image digest: For more information, see Docker images provided by CodeBuild in the CodeBuild user
+ * For more information, see Docker images provided by CodeBuild in the CodeBuild user
* guide. Information about the compute resources the build project uses. Available values
* include:
+ *
*
+ *
*
+ *
*
+ *
* If you use If you use For environment type For environment type For environment type For environment type For environment type For environment type For more information, see Build Environment
+ * For more information, see Build Environment
* Compute Types in the CodeBuild User Guide.
- * Enables running the Docker daemon inside a Docker container. Set to true only if the
* build project is used to build Docker images. Otherwise, a build that attempts to
* interact with the Docker daemon fails. The default setting is You can initialize the Docker daemon during the install phase of your build by adding
+ * You can initialize the Docker daemon during the install phase of your build by adding
* one of the following sets of commands to the install phase of your buildspec
* file: If the operating system's base image is Ubuntu Linux:
+ * If the operating system's base image is Ubuntu Linux:
*
+ *
* If the operating system's base image is Alpine Linux and the previous command does not
+ * If the operating system's base image is Alpine Linux and the previous command does not
* work, add the
+ *
*
+ *
* credential
can use the name of the credentials only if they
* exist in your current Amazon Web Services Region.
+ *
*
- * ARM_CONTAINER
is available only in regions
+ * ARM_CONTAINER
is available only in regions
* US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland),
* Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and
* EU (Frankfurt).LINUX_CONTAINER
with compute type
+ * LINUX_CONTAINER
with compute type
* build.general1.2xlarge
is available only in regions
* US East (N. Virginia), US East (Ohio), US West (Oregon),
* Canada (Central), EU (Ireland), EU (London),
@@ -834,7 +834,7 @@ export interface ProjectEnvironment {
* China (Ningxia).LINUX_GPU_CONTAINER
is available only in
+ * LINUX_GPU_CONTAINER
is available only in
* regions US East (N. Virginia), US East (Ohio), US West (Oregon),
* Canada (Central), EU (Ireland), EU (London),
* EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul),
@@ -842,15 +842,15 @@ export interface ProjectEnvironment {
* China (Ningxia).
+ *
*
- * WINDOWS_CONTAINER
and
+ * WINDOWS_CONTAINER
and
* WINDOWS_SERVER_2019_CONTAINER
are available only in regions
* US East (N. Virginia), US East (Ohio), US West (Oregon), and
* EU (Ireland).
+ *
*
- *
. For
+ *
. For
* example, in the Docker repository that CodeBuild uses to manage its Docker
* images, this would be aws/codebuild/standard:4.0
.
.
+ *
.
* For example, to specify an image with the digest
* "sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf," use
*
.
+ *
*
- * BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for
* builds.BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for
* builds.BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for
* builds, depending on your environment type.BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and
* 824 GB of SSD storage for builds. This compute type supports Docker images up to
* 100 GB uncompressed.BUILD_GENERAL1_LARGE
:
+ *
BUILD_GENERAL1_LARGE
:
*
- * LINUX_CONTAINER
, you can use up to 15 GB
+ * LINUX_CONTAINER
, you can use up to 15 GB
* memory and 8 vCPUs for builds. LINUX_GPU_CONTAINER
, you can use up to 255
+ * LINUX_GPU_CONTAINER
, you can use up to 255
* GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.ARM_CONTAINER
, you can use up to 16 GB
+ * ARM_CONTAINER
, you can use up to 16 GB
* memory and 8 vCPUs on ARM-based processors for builds.false
.- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&
- * - timeout 15 sh -c "until docker info; do echo .; sleep 1; done"
- * -t
argument to timeout
:- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&
* - timeout -t 15 sh -c "until docker info; do echo .; sleep 1; done"
- *
The type of credentials CodeBuild uses to pull images in your build. There are two valid * values:
- *
- * CODEBUILD
specifies that CodeBuild uses its own credentials.
+ *
+ * CODEBUILD
specifies that CodeBuild uses its own credentials.
* This requires that you modify your ECR repository policy to trust CodeBuild service principal.
- * SERVICE_ROLE
specifies that CodeBuild uses your build project's service
+ *
+ * SERVICE_ROLE
specifies that CodeBuild uses your build project's service
* role.
When you use a cross-account or private registry image, you must use SERVICE_ROLE + *
When you use a cross-account or private registry image, you must use SERVICE_ROLE * credentials. When you use an CodeBuild curated image, you must use CODEBUILD credentials. *
*/ @@ -1013,7 +1013,7 @@ export type FileSystemType = (typeof FileSystemType)[keyof typeof FileSystemType *Information about a file system created by Amazon Elastic File System (EFS). For more * information, see What Is * Amazon Elastic File System? - *
+ * */ export interface ProjectFileSystemLocation { /** @@ -1031,7 +1031,7 @@ export interface ProjectFileSystemLocation { * file system isfs-abcd1234.efs.us-west-2.amazonaws.com
, and its mount
* directory is my-efs-mount-directory
, then the location
is
* fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory
.
- * The directory path in the format efs-dns-name:/directory-path
is
+ *
The directory path in the format efs-dns-name:/directory-path
is
* optional. If you do not specify a directory path, the location is only the DNS name and
* CodeBuild mounts the entire file system.
CODEBUILD_
. For example, if you specify my_efs
for
* identifier
, a new environment variable is create named
* CODEBUILD_MY_EFS
.
- * The identifier
is used to mount your file system.
The identifier
is used to mount your file system.
The current status of the logs in CloudWatch Logs for a build project. Valid values are:
- *+ *
* ENABLED
: CloudWatch Logs are enabled for this build project.
+ *
* DISABLED
: CloudWatch Logs are not enabled for this build project.
The current status of the S3 build logs. Valid values are:
- *+ *
* ENABLED
: S3 build logs are enabled for this build project.
+ *
* DISABLED
: S3 build logs are not enabled for this build
* project.
To use this property, your CodeBuild service role must have the
* s3:PutBucketAcl
permission. This permission allows CodeBuild to modify
* the access control list for the bucket.
This property can be one of the following values:
+ *This property can be one of the following values:
*The bucket owner does not have access to the objects. This is the + *
The bucket owner does not have access to the objects. This is the * default.
*The bucket owner has read-only access to the objects. The uploading account + *
The bucket owner has read-only access to the objects. The uploading account * retains ownership of the objects.
*The bucket owner has full access to the objects. Object ownership is determined + *
The bucket owner has full access to the objects. Object ownership is determined * by the following criteria:
- *If the bucket is configured with the Bucket + *
If the bucket is configured with the Bucket * owner preferred setting, the bucket owner owns the * objects. The uploading account will have object access as specified * by the bucket's policy.
- *Otherwise, the uploading account retains ownership of the + *
Otherwise, the uploading account retains ownership of the * objects.
- *For more information about Amazon S3 object ownership, see Controlling ownership of uploaded objects using S3
+ * For more information about Amazon S3 object ownership, see Controlling ownership of uploaded objects using S3
* Object Ownership in the Amazon Simple Storage Service User
* Guide. The name of the batch build phase. Valid values include: Build output artifacts are being combined and uploaded to the output
+ * Build output artifacts are being combined and uploaded to the output
* location. The batch build specification is being downloaded. The batch build specification is being downloaded. One or more of the builds failed. One or more of the builds failed. The batch build is in progress. The batch build is in progress. The batch build was stopped. The batch build was stopped. The btach build has been submitted. The btach build has been submitted. The batch build succeeded. The batch build succeeded. The current status of the batch build phase. Valid values include: The build phase failed. The build phase failed. The build phase faulted. The build phase faulted. The build phase is still in progress. The build phase is still in progress. The build phase stopped. The build phase stopped. The build phase succeeded. The build phase succeeded. The build phase timed out. The build phase timed out. Information about the authorization settings for CodeBuild to access the source code to be
* built. This information is for the CodeBuild console's use only. Your code should not get or set
+ * This information is for the CodeBuild console's use only. Your code should not get or set
* this information directly. This data type is deprecated and is no longer accurate or used. The authorization type to use. The only valid value is The authorization type to use. The only valid value is Specifies the context of the build status CodeBuild sends to the source provider. The
* usage of this parameter depends on the source provider. This parameter is used for the This parameter is used for the This parameter is used for the This parameter is used for the Specifies the target url of the build status CodeBuild sends to the source provider. The
* usage of this parameter depends on the source provider. This parameter is used for the This parameter is used for the This parameter is used for the This parameter is used for the The type of repository that contains the source code to be built. Valid values
* include:
+ *
*
+ *
*
+ *
*
+ *
*
+ *
*
+ *
*
+ *
* Information about the location of the source code to be built. Valid values
* include: For source code settings that are specified in the source action of a pipeline
+ * For source code settings that are specified in the source action of a pipeline
* in CodePipeline, For source code in an CodeCommit repository, the HTTPS clone URL to the repository
+ * For source code in an CodeCommit repository, the HTTPS clone URL to the repository
* that contains the source code and the buildspec file (for example,
* For source code in an Amazon S3 input bucket, one of the following. For source code in an Amazon S3 input bucket, one of the following. The path to the ZIP file that contains the source code (for example,
+ * The path to the ZIP file that contains the source code (for example,
* The path to the folder that contains the source code (for example,
+ * The path to the folder that contains the source code (for example,
* For source code in a GitHub repository, the HTTPS clone URL to the repository
+ * For source code in a GitHub repository, the HTTPS clone URL to the repository
* that contains the source and the buildspec file. You must connect your Amazon Web Services account
* to your GitHub account. Use the CodeBuild console to start creating a build
* project. When you use the console to connect (or reconnect) with GitHub, on the
@@ -1565,7 +1565,7 @@ export interface ProjectSource {
* For source code in a Bitbucket repository, the HTTPS clone URL to the
+ * For source code in a Bitbucket repository, the HTTPS clone URL to the
* repository that contains the source and the buildspec file. You must connect
* your Amazon Web Services account to your Bitbucket account. Use the CodeBuild console to start
* creating a build project. When you use the console to connect (or reconnect)
@@ -1578,7 +1578,7 @@ export interface ProjectSource {
*
+ *
* If you specify The buildspec file declaration to use for the builds in this build project. If this value is set, it can be either an inline buildspec definition, the path to an
+ * If this value is set, it can be either an inline buildspec definition, the path to an
* alternate buildspec file relative to the value of the built-in
* Information about the authorization settings for CodeBuild to access the source code to be
* built. This information is for the CodeBuild console's use only. Your code should not get or set
+ * This information is for the CodeBuild console's use only. Your code should not get or set
* this information directly.
+ *
*
*/
phaseType?: BuildBatchPhaseType | string;
@@ -1305,31 +1305,31 @@ export interface BuildBatchPhase {
/**
* @public
*
+ *
*
*/
phaseStatus?: StatusType | string;
@@ -1378,7 +1378,7 @@ export type SourceAuthType = (typeof SourceAuthType)[keyof typeof SourceAuthType
* @public
* OAUTH
, which
+ *
+ * OAUTH
, which
* represents the OAuth authorization type.
+ *
*
*/
context?: string;
@@ -1428,17 +1428,17 @@ export interface BuildStatusConfig {
* @public
* name
parameter in the
+ * name
parameter in the
* Bitbucket commit status. For more information, see build in the Bitbucket API documentation.context
parameter in the
+ * context
parameter in the
* GitHub commit status. For more information, see Create a commit status in the GitHub developer guide.
+ *
*
*/
targetUrl?: string;
@@ -1485,36 +1485,36 @@ export interface ProjectSource {
* @public
* url
parameter in the Bitbucket
+ * url
parameter in the Bitbucket
* commit status. For more information, see build in the Bitbucket API documentation.target_url
parameter in the
+ * target_url
parameter in the
* GitHub commit status. For more information, see Create a commit status in the GitHub developer guide.
+ *
*
@@ -1525,33 +1525,33 @@ export interface ProjectSource {
* @public
* BITBUCKET
: The source code is in a Bitbucket repository.CODECOMMIT
: The source code is in an CodeCommit repository.CODEPIPELINE
: The source code settings are specified in the
* source action of a pipeline in CodePipeline.GITHUB
: The source code is in a GitHub or GitHub Enterprise Cloud
* repository.GITHUB_ENTERPRISE
: The source code is in a GitHub Enterprise
* Server repository.NO_SOURCE
: The project does not have input source code.S3
: The source code is in an Amazon S3 bucket.
+ *
- *
*
*
* location
should not be specified. If it is specified,
* CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source
* action instead of this value.https://git-codecommit.
).
+ *
*
).
). OAUTH
.OAUTH
.CODEPIPELINE
for the Type
property, don't specify this
* property. For all of the other types, you must specify Location
.
* CODEBUILD_SRC_DIR
environment variable, or the path to an S3 bucket.
* The bucket must be in the same Amazon Web Services Region as the build project. Specify the buildspec
@@ -1615,7 +1615,7 @@ export interface ProjectSource {
* @public
* invalidInputException
is thrown.
To be able to report the build status to the source provider, the user associated with the source provider must + *
To be able to report the build status to the source provider, the user associated with the source provider must * have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.
- *The status of a build triggered by a webhook is always reported to your source + *
The status of a build triggered by a webhook is always reported to your source * provider.
- *If your project's builds are triggered by a webhook, you must push a + *
If your project's builds are triggered by a webhook, you must push a * new commit to the repo for a change to this property to take * effect.
*/ @@ -1678,10 +1678,10 @@ export interface ProjectSourceVersion { * of: *For CodeCommit: the commit ID, branch, or Git tag to use.
+ *For CodeCommit: the commit ID, branch, or Git tag to use.
*For GitHub: the commit ID, pull request ID, branch name, or tag name that + *
For GitHub: the commit ID, pull request ID, branch name, or tag name that
* corresponds to the version of the source code you want to build. If a pull
* request ID is specified, it must use the format pr/pull-request-ID
* (for example, pr/25
). If a branch name is specified, the branch's
@@ -1689,13 +1689,13 @@ export interface ProjectSourceVersion {
* used.
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the + *
For Bitbucket: the commit ID, branch name, or tag name that corresponds to the * version of the source code you want to build. If a branch name is specified, the * branch's HEAD commit ID is used. If not specified, the default branch's HEAD * commit ID is used.
*For Amazon S3: the version ID of the object that represents the build input ZIP + *
For Amazon S3: the version ID of the object that represents the build input ZIP * file to use.
*The identifier of the resolved version of this batch build's source code.
- *For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.
+ *For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID.
*For CodePipeline, the source revision provided by CodePipeline.
+ *For CodePipeline, the source revision provided by CodePipeline.
*For Amazon S3, this does not apply.
+ *For Amazon S3, this does not apply.
*The entity that started the batch build. Valid values include:
- *If CodePipeline started the build, the pipeline's name (for example, + *
If CodePipeline started the build, the pipeline's name (for example,
* codepipeline/my-demo-pipeline
).
If an IAM user started the build, the user's name.
+ *If an IAM user started the build, the user's name.
*If the Jenkins plugin for CodeBuild started the build, the string + *
If the Jenkins plugin for CodeBuild started the build, the string
* CodeBuild-Jenkins-Plugin
.
The Key Management Service customer master key (CMK) to be used for encrypting the batch build output * artifacts.
- *You can use a cross-account KMS key to encrypt the build output artifacts if your * service role has permission to that key.
- *You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using + *
You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using
* the format alias/
).
Contains information about an exported environment variable.
- *Exported environment variables are used in conjunction with CodePipeline to export + *
Exported environment variables are used in conjunction with CodePipeline to export * environment variables from the current build stage to subsequent stages in the pipeline. * For more information, see Working with variables in the CodePipeline User Guide.
- * During a build, the value of a variable is available starting with the
* install
phase. It can be updated between the start of the
* install
phase and the end of the post_build
phase.
@@ -2170,52 +2170,52 @@ export interface BuildPhase {
/**
* @public
*
The name of the build phase. Valid values include:
- *Core build activities typically occur in this build phase.
- *Core build activities typically occur in this build phase.
+ * *The build has been completed.
- *The build has been completed.
+ * *Source code is being downloaded in this build phase.
- *Source code is being downloaded in this build phase.
+ * *The build process is completing in this build phase.
- *The build process is completing in this build phase.
+ * *Installation activities typically occur in this build phase.
- *Installation activities typically occur in this build phase.
+ * *Post-build activities typically occur in this build phase.
- *Post-build activities typically occur in this build phase.
+ * *Pre-build activities typically occur in this build phase.
- *Pre-build activities typically occur in this build phase.
+ * *The build environment is being set up.
- *The build environment is being set up.
+ * *The build has been submitted and is queued behind other submitted + *
The build has been submitted and is queued behind other submitted * builds.
- *The build has been submitted.
- *The build has been submitted.
+ * *Build output artifacts are being uploaded to the output location.
- *Build output artifacts are being uploaded to the output location.
+ * *The current status of the build phase. Valid values include:
- *The build phase failed.
- *The build phase failed.
+ * *The build phase faulted.
- *The build phase faulted.
+ * *The build phase is still in progress.
- *The build phase is still in progress.
+ * *The build phase stopped.
- *The build phase stopped.
+ * *The build phase succeeded.
- *The build phase succeeded.
+ * *The build phase timed out.
- *The build phase timed out.
+ * *The current status of the build. Valid values include:
- *+ *
* FAILED
: The build failed.
+ *
* FAULT
: The build faulted.
+ *
* IN_PROGRESS
: The build is still in progress.
+ *
* STOPPED
: The build stopped.
+ *
* SUCCEEDED
: The build succeeded.
+ *
* TIMED_OUT
: The build timed out.
Any version identifier for the version of the source code to be built. If
* sourceVersion
is specified at the project level, then this
* sourceVersion
(at the build level) takes precedence.
For more information, see Source Version Sample
+ * For more information, see Source Version Sample
* with CodeBuild in the CodeBuild User Guide. An identifier for the version of this build's source code. For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID. For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID. For CodePipeline, the source revision provided by CodePipeline. For CodePipeline, the source revision provided by CodePipeline. For Amazon S3, this does not apply. For Amazon S3, this does not apply. An array of For CodeCommit: the commit ID, branch, or Git tag to use. For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub: the commit ID, pull request ID, branch name, or tag name that
+ * For GitHub: the commit ID, pull request ID, branch name, or tag name that
* corresponds to the version of the source code you want to build. If a pull
* request ID is specified, it must use the format For Bitbucket: the commit ID, branch name, or tag name that corresponds to the
+ * For Bitbucket: the commit ID, branch name, or tag name that corresponds to the
* version of the source code you want to build. If a branch name is specified, the
* branch's HEAD commit ID is used. If not specified, the default branch's HEAD
* commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP
+ * For Amazon S3: the version ID of the object that represents the build input ZIP
* file to use. The entity that started the build. Valid values include: If CodePipeline started the build, the pipeline's name (for example,
+ * If CodePipeline started the build, the pipeline's name (for example,
* If an IAM user started the build, the user's name (for example,
+ * If an IAM user started the build, the user's name (for example,
* If the Jenkins plugin for CodeBuild started the build, the string
+ * If the Jenkins plugin for CodeBuild started the build, the string
* The Key Management Service customer master key (CMK) to be used for encrypting the build output
* artifacts. You can use a cross-account KMS key to encrypt the build output artifacts if your
* service role has permission to that key. You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using
+ * You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using
* the format A list of exported environment variables for this build. Exported environment variables are used in conjunction with CodePipeline to export
+ * Exported environment variables are used in conjunction with CodePipeline to export
* environment variables from the current build stage to subsequent stages in the pipeline.
* For more information, see Working with variables in the CodePipeline User Guide. The type of build output artifact. Valid values include:
+ *
* The The
+ *
*
+ *
* Information about the build output artifact location: If If If If If If Along with If If If If If If For example, if For example, if Along with If If If If If If
+ *
*
+ *
* For example, if For example, if Along with If If If If If If For example: For example: If If If If If If The type of build output artifact to create: If If If If If If
+ *
*
+ *
*
+ *
*
*/
@@ -2411,12 +2411,12 @@ export interface Build {
* @public
* ProjectSourceVersion
objects. Each
* ProjectSourceVersion
must be one of:
+ *
*
@@ -2495,17 +2495,17 @@ export interface Build {
/**
* @public
* pr/pull-request-ID
* (for example, pr/25
). If a branch name is specified, the branch's
@@ -2424,13 +2424,13 @@ export interface Build {
* used.
+ *
*
@@ -2531,11 +2531,11 @@ export interface Build {
* @public
* codepipeline/my-demo-pipeline
).MyUserName
).CodeBuild-Jenkins-Plugin
.alias/
).
+ *
*
@@ -2642,18 +2642,18 @@ export interface ProjectArtifacts {
/**
* @public
* CODEPIPELINE
: The build project has build output generated
* through CodePipeline. CODEPIPELINE
type is not supported for
+ * CODEPIPELINE
type is not supported for
* secondaryArtifacts
.NO_ARTIFACTS
: The build project does not produce any build
* output.S3
: The build project stores build output in Amazon S3.
+ *
*
@@ -2664,23 +2664,23 @@ export interface ProjectArtifacts {
* @public
* type
is set to CODEPIPELINE
, CodePipeline ignores this
+ * type
is set to CODEPIPELINE
, CodePipeline ignores this
* value if specified. This is because CodePipeline manages its build output locations
* instead of CodeBuild.type
is set to NO_ARTIFACTS
, this value is
+ * type
is set to NO_ARTIFACTS
, this value is
* ignored if specified, because no build output is produced.type
is set to S3
, this is the name of the output
+ * type
is set to S3
, this is the name of the output
* bucket.namespaceType
and name
, the pattern that CodeBuild
* uses to name and store the output artifact:
+ *
*
- * type
is set to CODEPIPELINE
, CodePipeline ignores this
+ * type
is set to CODEPIPELINE
, CodePipeline ignores this
* value if specified. This is because CodePipeline manages its build output names instead
* of CodeBuild.type
is set to NO_ARTIFACTS
, this value is
+ * type
is set to NO_ARTIFACTS
, this value is
* ignored if specified, because no build output is produced.type
is set to S3
, this is the path to the output
+ * type
is set to S3
, this is the path to the output
* artifact. If path
is not specified, path
is not
* used.path
is set to MyArtifacts
,
+ * path
is set to MyArtifacts
,
* namespaceType
is set to NONE
, and name
is set
* to MyArtifact.zip
, the output artifact is stored in the output bucket at
* MyArtifacts/MyArtifact.zip
.path
and name
, the pattern that CodeBuild uses to
* determine the name and location to store the output artifact:
+ *
- *
*
*
* type
is set to CODEPIPELINE
, CodePipeline ignores this
+ * type
is set to CODEPIPELINE
, CodePipeline ignores this
* value if specified. This is because CodePipeline manages its build output names instead
* of CodeBuild.type
is set to NO_ARTIFACTS
, this value is
+ * type
is set to NO_ARTIFACTS
, this value is
* ignored if specified, because no build output is produced.type
is set to S3
, valid values include:
+ *
type
is set to S3
, valid values include:
*
BUILD_ID
: Include the build ID in the location of the
* build output artifact.NONE
: Do not include the build ID. This is the default if
* namespaceType
is not specified.path
is set to MyArtifacts
,
+ * path
is set to MyArtifacts
,
* namespaceType
is set to BUILD_ID
, and name
is
* set to MyArtifact.zip
, the output artifact is stored in
* MyArtifacts/
.path
and namespaceType
, the pattern that CodeBuild
* uses to name and store the output artifact:
+ *
*
- * type
is set to CODEPIPELINE
, CodePipeline ignores this
+ * type
is set to CODEPIPELINE
, CodePipeline ignores this
* value if specified. This is because CodePipeline manages its build output names instead
* of CodeBuild.type
is set to NO_ARTIFACTS
, this value is
+ * type
is set to NO_ARTIFACTS
, this value is
* ignored if specified, because no build output is produced.type
is set to S3
, this is the name of the output
+ * type
is set to S3
, this is the name of the output
* artifact object. If you set the name to be a forward slash ("/"), the artifact
* is stored in the root of the output bucket.
+ *
@@ -2829,40 +2829,40 @@ export interface ProjectArtifacts {
* Amazon S3 bucket. By default, only the account that uploads the objects to the bucket has
* access to these objects. This property allows you to give the bucket owner access to
* these objects.
*
*
* path
is set to MyArtifacts
,
+ * path
is set to MyArtifacts
,
* namespaceType
is set to BUILD_ID
, and
* name
is set to MyArtifact.zip
, then the output
* artifact is stored in MyArtifacts/
. path
is empty, namespaceType
is set to
+ * path
is empty, namespaceType
is set to
* NONE
, and name
is set to "/
", the
* output artifact is stored in the root of the output bucket. path
is set to MyArtifacts
,
+ * path
is set to MyArtifacts
,
* namespaceType
is set to BUILD_ID
, and
* name
is set to "/
", the output artifact is stored
* in MyArtifacts/
.
+ *
*
type
is set to CODEPIPELINE
, CodePipeline ignores this
+ * type
is set to CODEPIPELINE
, CodePipeline ignores this
* value if specified. This is because CodePipeline manages its build output artifacts
* instead of CodeBuild.type
is set to NO_ARTIFACTS
, this value is
+ * type
is set to NO_ARTIFACTS
, this value is
* ignored if specified, because no build output is produced.type
is set to S3
, valid values include:
+ *
type
is set to S3
, valid values include:
*
NONE
: CodeBuild creates in the output bucket a folder that
* contains the build output. This is the default if packaging
* is not specified.ZIP
: CodeBuild creates in the output bucket a ZIP file that
* contains the build output.
To use this property, your CodeBuild service role must have the
* s3:PutBucketAcl
permission. This permission allows CodeBuild to modify
* the access control list for the bucket.
This property can be one of the following values:
+ *This property can be one of the following values:
*The bucket owner does not have access to the objects. This is the + *
The bucket owner does not have access to the objects. This is the * default.
*The bucket owner has read-only access to the objects. The uploading account + *
The bucket owner has read-only access to the objects. The uploading account * retains ownership of the objects.
*The bucket owner has full access to the objects. Object ownership is determined + *
The bucket owner has full access to the objects. Object ownership is determined * by the following criteria:
- *If the bucket is configured with the Bucket + *
If the bucket is configured with the Bucket * owner preferred setting, the bucket owner owns the * objects. The uploading account will have object access as specified * by the bucket's policy.
- *Otherwise, the uploading account retains ownership of the + *
Otherwise, the uploading account retains ownership of the * objects.
- *For more information about Amazon S3 object ownership, see Controlling ownership of uploaded objects using S3
+ * For more information about Amazon S3 object ownership, see Controlling ownership of uploaded objects using S3
* Object Ownership in the Amazon Simple Storage Service User
* Guide.
A tag, consisting of a key and a value.
- *This tag is available for use by Amazon Web Services services that support tags in CodeBuild.
+ *This tag is available for use by Amazon Web Services services that support tags in CodeBuild.
*/ export interface Tag { /** @@ -2966,12 +2966,12 @@ export interface WebhookFilter { * The type of webhook filter. There are six webhook filter types: EVENT
,
* ACTOR_ACCOUNT_ID
, HEAD_REF
, BASE_REF
,
* FILE_PATH
, and COMMIT_MESSAGE
.
A webhook event triggers a build when the provided pattern
+ *
A webhook event triggers a build when the provided pattern
* matches one of five event types: PUSH
,
* PULL_REQUEST_CREATED
, PULL_REQUEST_UPDATED
,
* PULL_REQUEST_REOPENED
, and
@@ -2979,63 +2979,63 @@ export interface WebhookFilter {
* specified as a comma-separated string. For example, PUSH,
* PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED
filters all push, pull
* request created, and pull request updated events.
The The PULL_REQUEST_REOPENED
works with GitHub and GitHub
+ * PULL_REQUEST_REOPENED
works with GitHub and GitHub
* Enterprise only.
A webhook event triggers a build when a GitHub, GitHub Enterprise, or + *
A webhook event triggers a build when a GitHub, GitHub Enterprise, or
* Bitbucket account ID matches the regular expression pattern
.
*
A webhook event triggers a build when the head reference matches the + *
A webhook event triggers a build when the head reference matches the
* regular expression pattern
. For example,
* refs/heads/branch-name
and refs/tags/tag-name
.
Works with GitHub and GitHub Enterprise push, GitHub and GitHub + *
Works with GitHub and GitHub Enterprise push, GitHub and GitHub * Enterprise pull request, Bitbucket push, and Bitbucket pull request events. *
- *A webhook event triggers a build when the base reference matches the + *
A webhook event triggers a build when the base reference matches the
* regular expression pattern
. For example,
* refs/heads/branch-name
.
Works with pull request events only.
- *Works with pull request events only.
+ *A webhook triggers a build when the path of a changed file matches the + *
A webhook triggers a build when the path of a changed file matches the
* regular expression pattern
.
Works with GitHub and Bitbucket events push and pull requests events.
+ * Works with GitHub and Bitbucket events push and pull requests events.
* Also works with GitHub Enterprise push events, but does not work with
* GitHub Enterprise pull request events.
A webhook triggers a build when the head commit message matches the + *
A webhook triggers a build when the head commit message matches the
* regular expression pattern
.
Works with GitHub and Bitbucket events push and pull requests events.
+ * Works with GitHub and Bitbucket events push and pull requests events.
* Also works with GitHub Enterprise push events, but does not work with
* GitHub Enterprise pull request events.
PUSH,
* PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED
allows all push, pull request
* created, and pull request updated events to trigger a build.
- * For a WebHookFilter
that uses any of the other filter types, a regular
+ *
For a WebHookFilter
that uses any of the other filter types, a regular
* expression pattern. For example, a WebHookFilter
that uses
* HEAD_REF
for its type
and the pattern
* ^refs/heads/
triggers a build when the head reference is a branch with
@@ -3341,7 +3341,6 @@ export interface Project {
/**
* @public
*
Specifies the visibility of the project's builds. Possible values are:
- * *The type of build output artifact to create. Valid values include:
- *
- * NONE
: CodeBuild creates the raw data in the output bucket. This
+ *
+ * NONE
: CodeBuild creates the raw data in the output bucket. This
* is the default if packaging is not specified.
- * ZIP
: CodeBuild creates a ZIP file with the raw data in the
+ *
+ * ZIP
: CodeBuild creates a ZIP file with the raw data in the
* output bucket.
The export configuration type. Valid values are:
- *
- * S3
: The report results are exported to an S3 bucket.
+ * S3
: The report results are exported to an S3 bucket.
- * NO_EXPORT
: The report results are not exported.
+ * NO_EXPORT
: The report results are not exported.
A list of tag key and value pairs associated with this report group.
- *These tags are available for use by Amazon Web Services services that support CodeBuild report group + *
These tags are available for use by Amazon Web Services services that support CodeBuild report group * tags.
*/ tags?: Tag[]; @@ -3659,9 +3658,9 @@ export interface BatchGetReportsInput { /** * @public *Contains a summary of a code coverage report.
- *Line coverage measures how many statements your tests cover. A statement is a single + *
Line coverage measures how many statements your tests cover. A statement is a single * instruction, not including comments, conditionals, etc.
- *Branch coverage determines if your tests cover every possible branch of a control + *
Branch coverage determines if your tests cover every possible branch of a control
* structure, such as an if
or case
statement.
Specifies how the results are sorted. Possible values are:
- *The results are sorted by file path.
- *The results are sorted by file path.
+ *The results are sorted by the percentage of lines that are covered.
- *The results are sorted by the percentage of lines that are covered.
+ * *Contains code coverage report information.
- *Line coverage measures how many statements your tests cover. A statement is a single + *
Line coverage measures how many statements your tests cover. A statement is a single * instruction, not including comments, conditionals, etc.
- *Branch coverage determines if your tests cover every possible branch of a control + *
Branch coverage determines if your tests cover every possible branch of a control
* structure, such as an if
or case
statement.
The status used to filter test cases. A TestCaseFilter
can have one
* status. Valid values are:
+ *
* SUCCEEDED
*
+ *
* FAILED
*
+ *
* ERROR
*
+ *
* SKIPPED
*
+ *
* UNKNOWN
*
The number of reports to analyze. This operation always retrieves the most recent * reports.
- *If this parameter is omitted, the most recent 100 reports are analyzed.
+ *If this parameter is omitted, the most recent 100 reports are analyzed.
*/ numOfReports?: number; @@ -4786,61 +4785,61 @@ export interface GetReportGroupTrendInput { *Accumulate the test run times for the specified + *
Accumulate the test run times for the specified * reports.
- *Accumulate the percentage of tests that passed for the + *
Accumulate the percentage of tests that passed for the * specified test reports.
- *Accumulate the total number of tests for the specified test + *
Accumulate the total number of tests for the specified test * reports.
- *Accumulate the branch coverage percentages for the specified + *
Accumulate the branch coverage percentages for the specified * test reports.
- *Accumulate the branches covered values for the specified test + *
Accumulate the branches covered values for the specified test * reports.
- *Accumulate the branches missed values for the specified test + *
Accumulate the branches missed values for the specified test * reports.
- *Accumulate the line coverage percentages for the specified + *
Accumulate the line coverage percentages for the specified * test reports.
- *Accumulate the lines covered values for the specified test + *
Accumulate the lines covered values for the specified test * reports.
- *Accumulate the lines not covered values for the specified test + *
Accumulate the lines not covered values for the specified test * reports.
- *Specifies the sort order of the returned items. Valid values include:
- *+ *
* ASCENDING
: List the batch build identifiers in ascending order by
* identifier.
+ *
* DESCENDING
: List the batch build identifiers in descending order
* by identifier.
The order to list build IDs. Valid values include:
- *+ *
* ASCENDING
: List the build IDs in ascending order by build
* ID.
+ *
* DESCENDING
: List the build IDs in descending order by build
* ID.
The order to sort the results in. The results are sorted by build number, not the build * identifier. If this is not specified, the results are sorted in descending order.
- *Valid values include:
- *Valid values include:
+ *+ *
* ASCENDING
: List the build identifiers in ascending order, by build number.
+ *
* DESCENDING
: List the build identifiers in descending order, by build number.
If the project has more than 100 builds, setting the sort order will result in an + *
If the project has more than 100 builds, setting the sort order will result in an * error.
*/ sortOrder?: SortOrderType | string; @@ -5395,23 +5394,23 @@ export interface ListProjectsInput { /** * @public *The criterion to be used to list build project names. Valid values include:
- *+ *
* CREATED_TIME
: List based on when each build project was
* created.
+ *
* LAST_MODIFIED_TIME
: List based on when information about each
* build project was last changed.
+ *
* NAME
: List based on each build project's name.
Use sortOrder
to specify in what order to list the build project names
+ *
Use sortOrder
to specify in what order to list the build project names
* based on the preceding criteria.
The order in which to list build projects. Valid values include:
- *+ *
* ASCENDING
: List in ascending order.
+ *
* DESCENDING
: List in descending order.
Use sortBy
to specify the criterion to be used to list build project
+ *
Use sortBy
to specify the criterion to be used to list build project
* names.
The criterion to be used to list build projects shared with the current Amazon Web Services account * or user. Valid values include:
- *
- * ARN
: List based on the ARN.
+ * ARN
: List based on the ARN.
- * MODIFIED_TIME
: List based on when information about the shared
+ *
+ * MODIFIED_TIME
: List based on when information about the shared
* project was last changed.
The order in which to list shared build projects. Valid values include:
- *+ *
* ASCENDING
: List in ascending order.
+ *
* DESCENDING
: List in descending order.
The order in which to list shared report groups. Valid values include:
- *+ *
* ASCENDING
: List in ascending order.
+ *
* DESCENDING
: List in descending order.
The criterion to be used to list report groups shared with the current Amazon Web Services account or * user. Valid values include:
- *
- * ARN
: List based on the ARN.
+ * ARN
: List based on the ARN.
- * MODIFIED_TIME
: List based on when information about the shared
+ *
+ * MODIFIED_TIME
: List based on when information about the shared
* report group was last changed.
The version of the build input to be built, for this build only. If not specified, * the latest version is used. If specified, the contents depends on the source * provider:
- *The commit ID, branch, or Git tag to use.
- *The commit ID, branch, or Git tag to use.
+ * *The commit ID, pull request ID, branch name, or tag name that corresponds + *
The commit ID, pull request ID, branch name, or tag name that corresponds
* to the version of the source code you want to build. If a pull request ID is
* specified, it must use the format pr/pull-request-ID
(for
* example pr/25
). If a branch name is specified, the branch's
* HEAD commit ID is used. If not specified, the default branch's HEAD commit
* ID is used.
The commit ID, branch name, or tag name that corresponds to the version of + *
The commit ID, branch name, or tag name that corresponds to the version of * the source code you want to build. If a branch name is specified, the * branch's HEAD commit ID is used. If not specified, the default branch's HEAD * commit ID is used.
- *The version ID of the object that represents the build input ZIP file to + *
The version ID of the object that represents the build input ZIP file to * use.
- *If sourceVersion
is specified at the project level, then this
+ *
If sourceVersion
is specified at the project level, then this
* sourceVersion
(at the build level) takes precedence.
For more information, see Source Version Sample
+ * For more information, see Source Version Sample
* with CodeBuild in the CodeBuild User Guide. A buildspec file declaration that overrides, for this build only, the latest one
* already defined in the build project. If this value is set, it can be either an inline buildspec definition, the path to an
+ * If this value is set, it can be either an inline buildspec definition, the path to an
* alternate buildspec file relative to the value of the built-in
* Set to true to report to your source provider the status of a build's start and
* completion. If you use this option with a source provider other than GitHub, GitHub
* Enterprise, or Bitbucket, an To be able to report the build status to the source provider, the user associated with the source provider must
+ * To be able to report the build status to the source provider, the user associated with the source provider must
* have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide. The status of a build triggered by a webhook is always reported to your source
* provider. The Key Management Service customer master key (CMK) that overrides the one specified in the build
* project. The CMK key encrypts the build output artifacts. You can use a cross-account KMS key to encrypt the build output artifacts if your
* service role has permission to that key. You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using
+ * You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using
* the format The type of credentials CodeBuild uses to pull images in your build. There are two valid
* values: Specifies that CodeBuild uses its own credentials. This requires that you
+ * Specifies that CodeBuild uses its own credentials. This requires that you
* modify your ECR repository policy to trust CodeBuild's service principal. Specifies that CodeBuild uses your build project's service role. Specifies that CodeBuild uses your build project's service role. When using a cross-account or private registry image, you must use
+ * When using a cross-account or private registry image, you must use
* The version of the batch build input to be built, for this build only. If not specified,
* the latest version is used. If specified, the contents depends on the source
* provider: The commit ID, branch, or Git tag to use. The commit ID, branch, or Git tag to use. The commit ID, pull request ID, branch name, or tag name that corresponds
+ * The commit ID, pull request ID, branch name, or tag name that corresponds
* to the version of the source code you want to build. If a pull request ID is
* specified, it must use the format The commit ID, branch name, or tag name that corresponds to the version of
+ * The commit ID, branch name, or tag name that corresponds to the version of
* the source code you want to build. If a branch name is specified, the
* branch's HEAD commit ID is used. If not specified, the default branch's HEAD
* commit ID is used. The version ID of the object that represents the build input ZIP file to
+ * The version ID of the object that represents the build input ZIP file to
* use. If The type of credentials CodeBuild uses to pull images in your batch build. There are two valid
* values: Specifies that CodeBuild uses its own credentials. This requires that you
+ * Specifies that CodeBuild uses its own credentials. This requires that you
* modify your ECR repository policy to trust CodeBuild's service principal. Specifies that CodeBuild uses your build project's service role. Specifies that CodeBuild uses your build project's service role. When using a cross-account or private registry image, you must use
* Specifies the visibility of the project's builds. Possible values are: Specifies the visibility of the project's builds. Possible values are:CODEBUILD_SRC_DIR
environment variable, or the path to an S3 bucket.
* The bucket must be in the same Amazon Web Services Region as the build project. Specify the buildspec
@@ -6216,12 +6215,12 @@ export interface StartBuildInput {
* invalidInputException
is thrown. alias/
).
+ *
*
- * SERVICE_ROLE
credentials. When using an CodeBuild curated image,
* you must use CODEBUILD
credentials.
+ *
*
* pr/pull-request-ID
(for
* example pr/25
). If a branch name is specified, the branch's
* HEAD commit ID is used. If not specified, the default branch's HEAD commit
* ID is used.sourceVersion
is specified at the project level, then this
* sourceVersion
(at the build level) takes precedence.
+ *
*
* SERVICE_ROLE
credentials. When using an CodeBuild curated image,
@@ -6926,7 +6925,6 @@ export interface UpdateProjectVisibilityInput {
/**
* @public
*
*
*
You can deploy a nearly unlimited variety of application content, such as an updated + *
You can deploy a nearly unlimited variety of application content, such as an updated * Lambda function, updated applications in an Amazon ECS service, * code, web and configuration files, executables, packages, scripts, multimedia files, and * so on. CodeDeploy can deploy application content stored in Amazon S3 * buckets, GitHub repositories, or Bitbucket repositories. You do not need to make changes * to your existing code before you can use CodeDeploy.
- *CodeDeploy makes it easier for you to rapidly release new features, helps + *
CodeDeploy makes it easier for you to rapidly release new features, helps * you avoid downtime during application deployment, and handles the complexity of updating * your applications, without many of the risks associated with error-prone manual * deployments.
- *+ *
* CodeDeploy Components - *
- *Use the information in this guide to help you work with the following CodeDeploy components:
- *Use the information in this guide to help you work with the following CodeDeploy components:
+ *- * Application: A name that uniquely identifies + *
+ * Application: A name that uniquely identifies * the application you want to deploy. CodeDeploy uses this name, which * functions as a container, to ensure the correct combination of revision, * deployment configuration, and deployment group are referenced during a * deployment.
*- * Deployment group: A set of individual + *
+ * Deployment group: A set of individual * instances, CodeDeploy * Lambda deployment configuration settings, or an Amazon ECS * service and network details. A Lambda deployment group specifies how @@ -1100,20 +1100,20 @@ export interface CodeDeploy { * settings.
*- * Deployment configuration: A set of deployment + *
+ * Deployment configuration: A set of deployment * rules and deployment success and failure conditions used by CodeDeploy during a deployment.
*- * Deployment: The process and the components used + *
+ * Deployment: The process and the components used * when updating a Lambda function, a containerized application in an * Amazon ECS service, or of installing content on one or more * instances.
*- * Application revisions: For an Lambda deployment, this is an AppSpec file that specifies the + *
+ * Application revisions: For an Lambda deployment, this is an AppSpec file that specifies the * Lambda function to be updated and one or more functions to * validate deployment lifecycle events. For an Amazon ECS deployment, this * is an AppSpec file that specifies the Amazon ECS task definition, @@ -1125,33 +1125,33 @@ export interface CodeDeploy { * identified by its commit ID.
*This guide also contains information to help you get details about the instances in + *
This guide also contains information to help you get details about the instances in * your deployments, to make on-premises instances available for CodeDeploy * deployments, to get details about a Lambda function deployment, and to get * details about Amazon ECS service deployments.
- *+ *
* CodeDeploy Information Resources *
- *+ *
*+ *
*+ *
*- * CodeDeploy Developer Forum - *
+ *+ * CodeDeploy Developer Forum + *
*You can deploy a nearly unlimited variety of application content, such as an updated + *
You can deploy a nearly unlimited variety of application content, such as an updated * Lambda function, updated applications in an Amazon ECS service, * code, web and configuration files, executables, packages, scripts, multimedia files, and * so on. CodeDeploy can deploy application content stored in Amazon S3 * buckets, GitHub repositories, or Bitbucket repositories. You do not need to make changes * to your existing code before you can use CodeDeploy.
- *CodeDeploy makes it easier for you to rapidly release new features, helps + *
CodeDeploy makes it easier for you to rapidly release new features, helps * you avoid downtime during application deployment, and handles the complexity of updating * your applications, without many of the risks associated with error-prone manual * deployments.
- *+ *
* CodeDeploy Components - *
- *Use the information in this guide to help you work with the following CodeDeploy components:
- *Use the information in this guide to help you work with the following CodeDeploy components:
+ *- * Application: A name that uniquely identifies + *
+ * Application: A name that uniquely identifies * the application you want to deploy. CodeDeploy uses this name, which * functions as a container, to ensure the correct combination of revision, * deployment configuration, and deployment group are referenced during a * deployment.
*- * Deployment group: A set of individual + *
+ * Deployment group: A set of individual * instances, CodeDeploy * Lambda deployment configuration settings, or an Amazon ECS * service and network details. A Lambda deployment group specifies how @@ -523,20 +523,20 @@ export interface CodeDeployClientResolvedConfig extends CodeDeployClientResolved * settings.
*- * Deployment configuration: A set of deployment + *
+ * Deployment configuration: A set of deployment * rules and deployment success and failure conditions used by CodeDeploy during a deployment.
*- * Deployment: The process and the components used + *
+ * Deployment: The process and the components used * when updating a Lambda function, a containerized application in an * Amazon ECS service, or of installing content on one or more * instances.
*- * Application revisions: For an Lambda deployment, this is an AppSpec file that specifies the + *
+ * Application revisions: For an Lambda deployment, this is an AppSpec file that specifies the * Lambda function to be updated and one or more functions to * validate deployment lifecycle events. For an Amazon ECS deployment, this * is an AppSpec file that specifies the Amazon ECS task definition, @@ -548,33 +548,33 @@ export interface CodeDeployClientResolvedConfig extends CodeDeployClientResolved * identified by its commit ID.
*This guide also contains information to help you get details about the instances in + *
This guide also contains information to help you get details about the instances in * your deployments, to make on-premises instances available for CodeDeploy * deployments, to get details about a Lambda function deployment, and to get * details about Amazon ECS service deployments.
- *+ *
* CodeDeploy Information Resources *
- *+ *
*+ *
*+ *
*- * CodeDeploy Developer Forum - *
+ *+ * CodeDeploy Developer Forum + *
* This method works, but is deprecated. Use BatchGetDeploymentTargets
* instead.
Returns an array of one or more instances associated with a deployment. This method + * + *
Returns an array of one or more instances associated with a deployment. This method
* works with EC2/On-premises and Lambda compute platforms. The newer
* BatchGetDeploymentTargets
works with all compute platforms. The maximum
* number of instances that can be returned is 25.
BatchGetDeploymentInstances
. The maximum number of targets that can be
* returned is 25.
- * The type of targets returned depends on the deployment's compute platform or + *
The type of targets returned depends on the deployment's compute platform or * deployment method:
- *- * EC2/On-premises: Information about Amazon EC2 instance targets.
+ *+ * EC2/On-premises: Information about Amazon EC2 instance targets.
*- * Lambda: Information about + *
+ * Lambda: Information about * Lambda functions targets.
*- * Amazon ECS: Information about Amazon ECS service targets.
+ *+ * Amazon ECS: Information about Amazon ECS service targets.
*+ *
* CloudFormation: Information about * targets of blue/green deployments initiated by a CloudFormation stack * update.
diff --git a/clients/client-codedeploy/src/commands/CreateDeploymentCommand.ts b/clients/client-codedeploy/src/commands/CreateDeploymentCommand.ts index 8674e8c1a8fa5..1520ce8f3e83b 100644 --- a/clients/client-codedeploy/src/commands/CreateDeploymentCommand.ts +++ b/clients/client-codedeploy/src/commands/CreateDeploymentCommand.ts @@ -153,21 +153,21 @@ export interface CreateDeploymentCommandOutput extends CreateDeploymentOutput, _ * * @throws {@link InvalidAlarmConfigException} (client fault) *The format of the alarm configuration is invalid. Possible causes include:
- *The alarm list is null.
+ *The alarm list is null.
*The alarm object is null.
+ *The alarm object is null.
*The alarm name is empty or null or exceeds the limit of 255 characters.
+ *The alarm name is empty or null or exceeds the limit of 255 characters.
*Two alarms with the same name have been specified.
+ *Two alarms with the same name have been specified.
*The alarm configuration is enabled, but the alarm list is empty.
+ *The alarm configuration is enabled, but the alarm list is empty.
*The target instance configuration is invalid. Possible causes include:
- *Configuration data for target instances was entered for an in-place + *
Configuration data for target instances was entered for an in-place * deployment.
*The limit of 10 tags for a tag type was exceeded.
+ *The limit of 10 tags for a tag type was exceeded.
*The combined length of the tag names exceeded the limit.
+ *The combined length of the tag names exceeded the limit.
*A specified tag is not currently applied to any instances.
+ *A specified tag is not currently applied to any instances.
*The format of the alarm configuration is invalid. Possible causes include:
- *The alarm list is null.
+ *The alarm list is null.
*The alarm object is null.
+ *The alarm object is null.
*The alarm name is empty or null or exceeds the limit of 255 characters.
+ *The alarm name is empty or null or exceeds the limit of 255 characters.
*Two alarms with the same name have been specified.
+ *Two alarms with the same name have been specified.
*The alarm configuration is enabled, but the alarm list is empty.
+ *The alarm configuration is enabled, but the alarm list is empty.
*Deletes a deployment configuration.
- *A deployment configuration cannot be deleted if it is currently in use. Predefined * configurations cannot be deleted.
- *Gets information about a deployment.
- * The content
property of the appSpecContent
object in
* the returned revision is always null. Use GetApplicationRevision
and
* the sha256
property of the returned appSpecContent
object
* to get the content of the deployment’s AppSpec file.
ListDeploymentInstances
throws an
* exception if it is used with a compute platform other than EC2/On-premises or
* Lambda.
- *
- * Lists the instance for a deployment associated with the IAM user or + * + *
Lists the instance for a deployment associated with the IAM user or * Amazon Web Services account.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-codedeploy/src/commands/ListOnPremisesInstancesCommand.ts b/clients/client-codedeploy/src/commands/ListOnPremisesInstancesCommand.ts index 69aeeff97f468..8ca94ceb7ed14 100644 --- a/clients/client-codedeploy/src/commands/ListOnPremisesInstancesCommand.ts +++ b/clients/client-codedeploy/src/commands/ListOnPremisesInstancesCommand.ts @@ -37,7 +37,7 @@ export interface ListOnPremisesInstancesCommandOutput extends ListOnPremisesInst /** * @public *Gets a list of names for one or more on-premises instances.
- *Unless otherwise specified, both registered and deregistered on-premises instance + *
Unless otherwise specified, both registered and deregistered on-premises instance * names are listed. To list only registered or deregistered on-premises instance names, * use the registration status parameter.
* @example diff --git a/clients/client-codedeploy/src/commands/RegisterOnPremisesInstanceCommand.ts b/clients/client-codedeploy/src/commands/RegisterOnPremisesInstanceCommand.ts index 3547a82f43d6a..302b525871adf 100644 --- a/clients/client-codedeploy/src/commands/RegisterOnPremisesInstanceCommand.ts +++ b/clients/client-codedeploy/src/commands/RegisterOnPremisesInstanceCommand.ts @@ -37,9 +37,9 @@ export interface RegisterOnPremisesInstanceCommandOutput extends __MetadataBeare /** * @public *Registers an on-premises instance.
- *Only one IAM ARN (an IAM session ARN or IAM user ARN) is supported in the request. You cannot use both.
- *The format of the alarm configuration is invalid. Possible causes include:
- *The alarm list is null.
+ *The alarm list is null.
*The alarm object is null.
+ *The alarm object is null.
*The alarm name is empty or null or exceeds the limit of 255 characters.
+ *The alarm name is empty or null or exceeds the limit of 255 characters.
*Two alarms with the same name have been specified.
+ *Two alarms with the same name have been specified.
*The alarm configuration is enabled, but the alarm list is empty.
+ *The alarm configuration is enabled, but the alarm list is empty.
*You can deploy a nearly unlimited variety of application content, such as an updated + *
You can deploy a nearly unlimited variety of application content, such as an updated * Lambda function, updated applications in an Amazon ECS service, * code, web and configuration files, executables, packages, scripts, multimedia files, and * so on. CodeDeploy can deploy application content stored in Amazon S3 * buckets, GitHub repositories, or Bitbucket repositories. You do not need to make changes * to your existing code before you can use CodeDeploy.
- *CodeDeploy makes it easier for you to rapidly release new features, helps + *
CodeDeploy makes it easier for you to rapidly release new features, helps * you avoid downtime during application deployment, and handles the complexity of updating * your applications, without many of the risks associated with error-prone manual * deployments.
- *+ *
* CodeDeploy Components - *
- *Use the information in this guide to help you work with the following CodeDeploy components:
- *Use the information in this guide to help you work with the following CodeDeploy components:
+ *- * Application: A name that uniquely identifies + *
+ * Application: A name that uniquely identifies * the application you want to deploy. CodeDeploy uses this name, which * functions as a container, to ensure the correct combination of revision, * deployment configuration, and deployment group are referenced during a * deployment.
*- * Deployment group: A set of individual + *
+ * Deployment group: A set of individual * instances, CodeDeploy * Lambda deployment configuration settings, or an Amazon ECS * service and network details. A Lambda deployment group specifies how @@ -40,20 +40,20 @@ * settings.
*- * Deployment configuration: A set of deployment + *
+ * Deployment configuration: A set of deployment * rules and deployment success and failure conditions used by CodeDeploy during a deployment.
*- * Deployment: The process and the components used + *
+ * Deployment: The process and the components used * when updating a Lambda function, a containerized application in an * Amazon ECS service, or of installing content on one or more * instances.
*- * Application revisions: For an Lambda deployment, this is an AppSpec file that specifies the + *
+ * Application revisions: For an Lambda deployment, this is an AppSpec file that specifies the * Lambda function to be updated and one or more functions to * validate deployment lifecycle events. For an Amazon ECS deployment, this * is an AppSpec file that specifies the Amazon ECS task definition, @@ -65,33 +65,33 @@ * identified by its commit ID.
*This guide also contains information to help you get details about the instances in + *
This guide also contains information to help you get details about the instances in * your deployments, to make on-premises instances available for CodeDeploy * deployments, to get details about a Lambda function deployment, and to get * details about Amazon ECS service deployments.
- *+ *
* CodeDeploy Information Resources *
- *+ *
*+ *
*+ *
*- * CodeDeploy Developer Forum - *
+ *+ * CodeDeploy Developer Forum + *
*The tag key-value pairs to add to the on-premises instances.
- *Keys and values are both required. Keys cannot be null or empty strings. Value-only + *
Keys and values are both required. Keys cannot be null or empty strings. Value-only * tags are not allowed.
*/ tags: Tag[] | undefined; @@ -210,14 +210,14 @@ export interface AlarmConfiguration { * @public *Indicates whether a deployment should continue if information about the current state * of alarms cannot be retrieved from Amazon CloudWatch. The default value is false.
- *+ *
* true
: The deployment proceeds even if alarm status information
* can't be retrieved from Amazon CloudWatch.
+ *
* false
: The deployment stops if alarm status information can't be
* retrieved from Amazon CloudWatch.
The YAML-formatted or JSON-formatted revision string.
- *For an Lambda deployment, the content includes a Lambda + *
For an Lambda deployment, the content includes a Lambda * function name, the alias for its original version, and the alias for its replacement * version. The deployment shifts traffic from the original version of the Lambda function to the replacement version.
- *For an Amazon ECS deployment, the content includes the task name, information + *
For an Amazon ECS deployment, the content includes the task name, information * about the load balancer that serves traffic to the container, and more.
- *For both types of deployments, the content can specify Lambda functions + *
For both types of deployments, the content can specify Lambda functions
* that run at specified hooks, such as BeforeInstall
, during a deployment.
*
The GitHub account and repository pair that stores a reference to the commit that * represents the bundled artifacts for the application revision.
- *Specified as account/repository.
+ *Specified as account/repository.
*/ repository?: string; @@ -584,17 +584,17 @@ export interface S3Location { /** * @public *The file type of the application revision. Must be one of the following:
- *+ *
* tar
: A tar archive file.
+ *
* tgz
: A compressed tar archive file.
+ *
* zip
: A zip archive file.
A specific version of the Amazon S3 object that represents the bundled * artifacts for the application revision.
- *If the version is not specified, the system uses the most recent version by + *
If the version is not specified, the system uses the most recent version by * default.
*/ version?: string; @@ -614,7 +614,7 @@ export interface S3Location { * @public *The ETag of the Amazon S3 object that represents the bundled artifacts for the * application revision.
- *If the ETag is not specified as an input parameter, ETag validation of the object is + *
If the ETag is not specified as an input parameter, ETag validation of the object is * skipped.
*/ eTag?: string; @@ -652,20 +652,20 @@ export interface RevisionLocation { /** * @public *The type of application revision:
- *S3: An application revision stored in Amazon S3.
+ *S3: An application revision stored in Amazon S3.
*GitHub: An application revision stored in GitHub (EC2/On-premises deployments + *
GitHub: An application revision stored in GitHub (EC2/On-premises deployments * only).
*String: A YAML-formatted or JSON-formatted string (Lambda + *
String: A YAML-formatted or JSON-formatted string (Lambda * deployments only).
*AppSpecContent: An AppSpecContent
object that contains the
+ *
AppSpecContent: An AppSpecContent
object that contains the
* contents of an AppSpec file for an Lambda or Amazon ECS
* deployment. The content is formatted as JSON or YAML stored as a
* RawString.
Information about when to reroute traffic from an original environment to a * replacement environment in a blue/green deployment.
- *CONTINUE_DEPLOYMENT: Register new instances with the load balancer immediately + *
CONTINUE_DEPLOYMENT: Register new instances with the load balancer immediately * after the new application revision is installed on the instances in the * replacement environment.
*STOP_DEPLOYMENT: Do not register new instances with a load balancer unless + *
STOP_DEPLOYMENT: Do not register new instances with a load balancer unless * traffic rerouting is started using ContinueDeployment. If * traffic rerouting is not started before the end of the specified wait period, * the deployment status is changed to Stopped.
@@ -1000,14 +1000,14 @@ export interface GreenFleetProvisioningOption { /** * @public *The method used to add instances to a replacement environment.
- *+ *
* DISCOVER_EXISTING
: Use instances that already exist or will be
* created manually.
+ *
* COPY_AUTO_SCALING_GROUP
: Use settings from a specified Auto Scaling group to define and create instances in a new Auto Scaling
* group.
The action to take on instances in the original environment after a successful * blue/green deployment.
- *+ *
* TERMINATE
: Instances are terminated after a specified wait
* time.
+ *
* KEEP_ALIVE
: Instances are left running after they are
* deregistered from the load balancer and removed from the deployment
* group.
For an Amazon EC2 deployment, the number of minutes to wait after a successful * blue/green deployment before terminating instances from the original environment.
- * - *For an Amazon ECS deployment, the number of minutes before deleting the + *
For an Amazon ECS deployment, the number of minutes before deleting the * original (blue) task set. During an Amazon ECS deployment, CodeDeploy shifts * traffic from the original (blue) task set to a replacement (green) task set.
- * - *The maximum setting is 2880 minutes (2 days).
+ *The maximum setting is 2880 minutes (2 days).
*/ terminationWaitTimeInMinutes?: number; } @@ -1180,17 +1178,17 @@ export interface EC2TagFilter { /** * @public *The tag filter type:
- *+ *
* KEY_ONLY
: Key only.
+ *
* VALUE_ONLY
: Value only.
+ *
* KEY_AND_VALUE
: Key and value.
An array that contains information about the load balancer to use for load balancing * in a deployment. In Elastic Load Balancing, load balancers are used with Classic Load * Balancers.
- *Adding more than one load balancer to the array is not supported.
- *An array that contains information about the target group to use for load balancing in * a deployment. In Elastic Load Balancing, target groups are used with Application Load * Balancers.
- *Adding more than one target group to the array is not supported.
- *The on-premises instance tag filter type:
- *KEY_ONLY: Key only.
+ *KEY_ONLY: Key only.
*VALUE_ONLY: Value only.
+ *VALUE_ONLY: Value only.
*KEY_AND_VALUE: Key and value.
+ *KEY_AND_VALUE: Key and value.
*Indicates what happens when new Amazon EC2 instances are launched * mid-deployment and do not receive the deployed application revision.
- *If this option is set to UPDATE
or is unspecified, CodeDeploy initiates
+ *
If this option is set to UPDATE
or is unspecified, CodeDeploy initiates
* one or more 'auto-update outdated instances' deployments to apply the deployed
* application revision to the new Amazon EC2 instances.
If this option is set to IGNORE
, CodeDeploy does not initiate a
+ *
If this option is set to IGNORE
, CodeDeploy does not initiate a
* deployment to update the new Amazon EC2 instances. This may result in instances
* having different revisions.
The associated error code:
- *Success: The specified script ran.
+ *Success: The specified script ran.
*ScriptMissing: The specified script was not found in the specified + *
ScriptMissing: The specified script was not found in the specified * location.
*ScriptNotExecutable: The specified script is not a recognized executable file + *
ScriptNotExecutable: The specified script is not a recognized executable file * type.
*ScriptTimedOut: The specified script did not finish running in the specified + *
ScriptTimedOut: The specified script did not finish running in the specified * time period.
*ScriptFailed: The specified script failed to run as expected.
+ *ScriptFailed: The specified script failed to run as expected.
*UnknownError: The specified script did not run for an unknown reason.
+ *UnknownError: The specified script did not run for an unknown reason.
*The last portion of the diagnostic log.
- *If available, CodeDeploy returns up to the last 4 KB of the diagnostic + *
If available, CodeDeploy returns up to the last 4 KB of the diagnostic * log.
*/ logTail?: string; @@ -1921,24 +1919,24 @@ export interface LifecycleEvent { /** * @public *The deployment lifecycle event status:
- *Pending: The deployment lifecycle event is pending.
+ *Pending: The deployment lifecycle event is pending.
*InProgress: The deployment lifecycle event is in progress.
+ *InProgress: The deployment lifecycle event is in progress.
*Succeeded: The deployment lifecycle event ran successfully.
+ *Succeeded: The deployment lifecycle event ran successfully.
*Failed: The deployment lifecycle event has failed.
+ *Failed: The deployment lifecycle event has failed.
*Skipped: The deployment lifecycle event has been skipped.
+ *Skipped: The deployment lifecycle event has been skipped.
*Unknown: The deployment lifecycle event is unknown.
+ *Unknown: The deployment lifecycle event is unknown.
*The deployment status for this instance:
- *+ *
* Pending
: The deployment is pending for this instance.
+ *
* In Progress
: The deployment is in progress for this
* instance.
+ *
* Succeeded
: The deployment has succeeded for this instance.
+ *
* Failed
: The deployment has failed for this instance.
+ *
* Skipped
: The deployment has been skipped for this
* instance.
+ *
* Unknown
: The deployment status is unknown for this
* instance.
Information about which environment an instance belongs to in a blue/green * deployment.
- *BLUE: The instance is part of the original environment.
+ *BLUE: The instance is part of the original environment.
*GREEN: The instance is part of the replacement environment.
+ *GREEN: The instance is part of the replacement environment.
*For more information, see Error Codes for CodeDeploy in the CodeDeploy User Guide.
- *The error code:
- *The error code:
+ *APPLICATION_MISSING: The application was missing. This error code is most + *
APPLICATION_MISSING: The application was missing. This error code is most * likely raised if the application is deleted after the deployment is created, but * before it is started.
*DEPLOYMENT_GROUP_MISSING: The deployment group was missing. This error code is + *
DEPLOYMENT_GROUP_MISSING: The deployment group was missing. This error code is * most likely raised if the deployment group is deleted after the deployment is * created, but before it is started.
*HEALTH_CONSTRAINTS: The deployment failed on too many instances to be + *
HEALTH_CONSTRAINTS: The deployment failed on too many instances to be * successfully deployed within the instance health constraints specified.
*HEALTH_CONSTRAINTS_INVALID: The revision cannot be successfully deployed + *
HEALTH_CONSTRAINTS_INVALID: The revision cannot be successfully deployed * within the instance health constraints specified.
*IAM_ROLE_MISSING: The service role cannot be accessed.
+ *IAM_ROLE_MISSING: The service role cannot be accessed.
*IAM_ROLE_PERMISSIONS: The service role does not have the + *
IAM_ROLE_PERMISSIONS: The service role does not have the * correct permissions.
*INTERNAL_ERROR: There was an internal error.
+ *INTERNAL_ERROR: There was an internal error.
*NO_EC2_SUBSCRIPTION: The calling account is not subscribed to Amazon EC2.
+ *NO_EC2_SUBSCRIPTION: The calling account is not subscribed to Amazon EC2.
*NO_INSTANCES: No instances were specified, or no instances can be + *
NO_INSTANCES: No instances were specified, or no instances can be * found.
*OVER_MAX_INSTANCES: The maximum number of instances was exceeded.
+ *OVER_MAX_INSTANCES: The maximum number of instances was exceeded.
*THROTTLED: The operation was throttled because the calling account exceeded + *
THROTTLED: The operation was throttled because the calling account exceeded * the throttling limits of one or more Amazon Web Services services.
*TIMEOUT: The deployment has timed out.
+ *TIMEOUT: The deployment has timed out.
*REVISION_MISSING: The revision ID was missing. This error code is most likely + *
REVISION_MISSING: The revision ID was missing. This error code is most likely * raised if the revision is deleted after the deployment is created, but before it * is started.
*A timestamp that indicates when the deployment was deployed to the deployment * group.
- *In some cases, the reported value of the start time might be later than the complete + *
In some cases, the reported value of the start time might be later than the complete * time. This is due to differences in the clock settings of backend servers that * participate in the deployment process.
*/ @@ -2545,22 +2543,22 @@ export interface DeploymentInfo { /** * @public *The means by which the deployment was created:
- *+ *
* user
: A user created the deployment.
+ *
* autoscaling
: Amazon EC2 Auto Scaling created the deployment.
+ *
* codeDeployRollback
: A rollback process created the
* deployment.
+ *
* CodeDeployAutoUpdate
: An auto-update process created the
* deployment when it detected outdated Amazon EC2 instances.
BeforeBlockTraffic
fails, the deployment continues with
* BlockTraffic
. If AfterBlockTraffic
fails, the deployment
* continues with ApplicationStop
.
- *
- * If false or not specified, then if a lifecycle event fails during a deployment to an + *
If false or not specified, then if a lifecycle event fails during a deployment to an * instance, that deployment fails. If deployment to that instance is part of an overall * deployment and the number of healthy hosts is not less than the minimum number of * healthy hosts, then a deployment to the next instance is attempted.
- * - *During a deployment, the CodeDeploy agent runs the scripts specified for + *
During a deployment, the CodeDeploy agent runs the scripts specified for
* ApplicationStop
, BeforeBlockTraffic
, and
* AfterBlockTraffic
in the AppSpec file from the previous successful
* deployment. (All other scripts are run from the AppSpec file in the current deployment.)
* If one of these scripts contains an error and does not run successfully, the deployment
* can fail.
If the cause of the failure is a script from the last successful deployment that will + *
If the cause of the failure is a script from the last successful deployment that will
* never run successfully, create a new deployment and use
* ignoreApplicationStopFailures
to specify that the
* ApplicationStop
, BeforeBlockTraffic
, and
@@ -2666,19 +2661,19 @@ export interface DeploymentInfo {
*
Information about how CodeDeploy handles files that already exist in a * deployment target location but weren't part of the previous successful * deployment.
- *+ *
* DISALLOW
: The deployment fails. This is also the default behavior
* if no option is specified.
+ *
* OVERWRITE
: The version of the file from the application revision
* currently being deployed replaces the version already on the instance.
+ *
* RETAIN
: The version of the file already on the instance is kept
* and used as part of the new deployment.
The unique IDs of the deployment targets. The compute platform of the deployment * determines the type of the targets and their formats. The maximum number of deployment * target IDs you can specify is 25.
- *For deployments that use the EC2/On-premises compute platform, the target IDs + *
For deployments that use the EC2/On-premises compute platform, the target IDs
* are Amazon EC2 or on-premises instances IDs, and their target type is
* instanceTarget
.
For deployments that use the Lambda compute platform, the + *
For deployments that use the Lambda compute platform, the
* target IDs are the names of Lambda functions, and their target type
* is instanceTarget
.
For deployments that use the Amazon ECS compute platform, the target + *
For deployments that use the Amazon ECS compute platform, the target
* IDs are pairs of Amazon ECS clusters and services specified using the
* format
. Their target type
* is ecsTarget
.
For deployments that are deployed with CloudFormation, the target IDs are + *
For deployments that are deployed with CloudFormation, the target IDs are
* CloudFormation stack IDs. Their target type is
* cloudFormationTarget
.
The status of the task set. There are three valid task set statuses:
- *
- * PRIMARY
: Indicates the task set is serving production traffic.
+ *
+ * PRIMARY
: Indicates the task set is serving production traffic.
*
- * ACTIVE
: Indicates the task set is not serving production traffic.
+ *
+ * ACTIVE
: Indicates the task set is not serving production traffic.
*
- * DRAINING
: Indicates the tasks in the task set are being stopped and
+ *
+ * DRAINING
: Indicates the tasks in the task set are being stopped and
* their corresponding targets are being deregistered from their target group.
*
A list of target objects for a deployment. Each target object contains details about * the target, such as its status and lifecycle events. The type of the target objects * depends on the deployment' compute platform.
- *- * EC2/On-premises: Each target object is an + *
+ * EC2/On-premises: Each target object is an * Amazon EC2 or on-premises instance.
*- * Lambda: The target object is a + *
+ * Lambda: The target object is a * specific version of an Lambda function.
*- * Amazon ECS: The target object is an + *
+ * Amazon ECS: The target object is an * Amazon ECS service.
*- * CloudFormation: The target object is + *
+ * CloudFormation: The target object is * an CloudFormation blue/green deployment.
*The name of a deployment configuration associated with the IAM user or * Amazon Web Services account.
- *If not specified, the value configured in the deployment group is used as the default. + *
If not specified, the value configured in the deployment group is used as the default.
* If the deployment group does not have a deployment configuration associated with it,
* CodeDeployDefault
.OneAtATime
is used by default.
DownloadBundle
. If BeforeBlockTraffic
fails, the
* deployment continues with BlockTraffic
. If AfterBlockTraffic
* fails, the deployment continues with ApplicationStop
.
- *
- * If false or not specified, then if a lifecycle event fails during a deployment to an + *
If false or not specified, then if a lifecycle event fails during a deployment to an * instance, that deployment fails. If deployment to that instance is part of an overall * deployment and the number of healthy hosts is not less than the minimum number of * healthy hosts, then a deployment to the next instance is attempted.
- * - *During a deployment, the CodeDeploy agent runs the scripts specified for + *
During a deployment, the CodeDeploy agent runs the scripts specified for
* ApplicationStop
, BeforeBlockTraffic
, and
* AfterBlockTraffic
in the AppSpec file from the previous successful
* deployment. (All other scripts are run from the AppSpec file in the current deployment.)
* If one of these scripts contains an error and does not run successfully, the deployment
* can fail.
If the cause of the failure is a script from the last successful deployment that will + *
If the cause of the failure is a script from the last successful deployment that will
* never run successfully, create a new deployment and use
* ignoreApplicationStopFailures
to specify that the
* ApplicationStop
, BeforeBlockTraffic
, and
@@ -3741,19 +3733,19 @@ export interface CreateDeploymentInput {
*
Information about how CodeDeploy handles files that already exist in a * deployment target location but weren't part of the previous successful * deployment.
- *The fileExistsBehavior
parameter takes any of the following
+ *
The fileExistsBehavior
parameter takes any of the following
* values:
DISALLOW: The deployment fails. This is also the default behavior if no option + *
DISALLOW: The deployment fails. This is also the default behavior if no option * is specified.
*OVERWRITE: The version of the file from the application revision currently + *
OVERWRITE: The version of the file from the application revision currently * being deployed replaces the version already on the instance.
*RETAIN: The version of the file already on the instance is kept and used as + *
RETAIN: The version of the file already on the instance is kept and used as * part of the new deployment.
*If you specify an overrideAlarmConfiguration
, you need the
* UpdateDeploymentGroup
IAM permission when calling
* CreateDeployment
.
The format of the alarm configuration is invalid. Possible causes include:
- *The alarm list is null.
+ *The alarm list is null.
*The alarm object is null.
+ *The alarm object is null.
*The alarm name is empty or null or exceeds the limit of 255 characters.
+ *The alarm name is empty or null or exceeds the limit of 255 characters.
*Two alarms with the same name have been specified.
+ *Two alarms with the same name have been specified.
*The alarm configuration is enabled, but the alarm list is empty.
+ *The alarm configuration is enabled, but the alarm list is empty.
*The target instance configuration is invalid. Possible causes include:
- *Configuration data for target instances was entered for an in-place + *
Configuration data for target instances was entered for an in-place * deployment.
*The limit of 10 tags for a tag type was exceeded.
+ *The limit of 10 tags for a tag type was exceeded.
*The combined length of the tag names exceeded the limit.
+ *The combined length of the tag names exceeded the limit.
*A specified tag is not currently applied to any instances.
+ *A specified tag is not currently applied to any instances.
*The minimum healthy instance type:
- *+ *
* HOST_COUNT
: The minimum number of healthy instances as an
* absolute value.
+ *
* FLEET_PERCENT
: The minimum number of healthy instances as a
* percentage of the total number of instances in the deployment.
In an example of nine instances, if a HOST_COUNT of six is specified, deploy to up to + *
In an example of nine instances, if a HOST_COUNT of six is specified, deploy to up to * three instances at a time. The deployment is successful if six or more instances are * deployed to successfully. Otherwise, the deployment fails. If a FLEET_PERCENT of 40 is * specified, deploy to up to five instances at a time. The deployment is successful if * four or more instances are deployed to successfully. Otherwise, the deployment * fails.
- *In a call to the GetDeploymentConfig
, CodeDeployDefault.OneAtATime
* returns a minimum healthy instance type of MOST_CONCURRENCY and a value of 1. This
* means a deployment to only one instance at a time. (You cannot set the type to
@@ -4228,8 +4220,8 @@ export interface MinimumHealthyHosts {
* allows one instance at a time to be taken offline for a new deployment, it also
* means that if the deployment to the last instance fails, the overall deployment is
* still successful.
For more information, see CodeDeploy + *
For more information, see CodeDeploy * Instance Health in the CodeDeploy User * Guide.
*/ @@ -4352,21 +4344,21 @@ export interface CreateDeploymentConfigInput { * @public *The minimum number of healthy instances that should be available at any time during * the deployment. There are two parameters expected in the input: type and value.
- *The type parameter takes either of the following values:
- *The type parameter takes either of the following values:
+ *HOST_COUNT: The value parameter represents the minimum number of healthy + *
HOST_COUNT: The value parameter represents the minimum number of healthy * instances as an absolute value.
*FLEET_PERCENT: The value parameter represents the minimum number of healthy + *
FLEET_PERCENT: The value parameter represents the minimum number of healthy * instances as a percentage of the total number of instances in the deployment. If * you specify FLEET_PERCENT, at the start of the deployment, CodeDeploy converts the percentage to the equivalent number of instances and rounds up * fractional instances.
*The value parameter takes an integer.
- *For example, to set a minimum of 95% healthy instance, specify a type of FLEET_PERCENT + *
The value parameter takes an integer.
+ *For example, to set a minimum of 95% healthy instance, specify a type of FLEET_PERCENT * and a value of 95.
*/ minimumHealthyHosts?: MinimumHealthyHosts; @@ -4500,11 +4492,11 @@ export interface CreateDeploymentGroupInput { *If specified, the deployment configuration name can be either one of the predefined * configurations provided with CodeDeploy or a custom deployment configuration * that you create by calling the create deployment configuration operation.
- *+ *
* CodeDeployDefault.OneAtATime
is the default deployment configuration. It
* is used if a configuration isn't specified for the deployment or deployment
* group.
For more information about the predefined deployment configurations in CodeDeploy, see Working with
+ * For more information about the predefined deployment configurations in CodeDeploy, see Working with
* Deployment Configurations in CodeDeploy in the CodeDeploy User Guide. Indicates what happens when new Amazon EC2 instances are launched
* mid-deployment and do not receive the deployed application revision. If this option is set to If this option is set to If this option is set to If this option is set to The column name to use to sort the list results:
+ *
*
+ *
*
+ *
* If not specified or set to null, the results are returned in an arbitrary order.
+ * If not specified or set to null, the results are returned in an arbitrary order.
* The order in which to sort the list results:
+ *
*
+ *
* If not specified, the results are sorted in ascending order. If set to null, the results are sorted in an arbitrary order. If not specified, the results are sorted in ascending order. If set to null, the results are sorted in an arbitrary order. An Amazon S3 bucket name to limit the search for revisions. If set to null, all of the user's buckets are searched. If set to null, all of the user's buckets are searched. Whether to list revisions based on whether the revision is the target revision of a
* deployment group:
+ *
*
+ *
*
+ *
* A subset of instances to list by status:
+ *
*
+ *
*
+ *
*
+ *
*
+ *
*
+ *
* The start time of the time range. Specify null to leave the start time open-ended. The end time of the time range. Specify null to leave the end time open-ended. The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account. If The name of a deployment group for the specified application. If A subset of deployments to list by status:
+ *
*
+ *
*
+ *
*
+ *
*
+ *
*
+ *
* A key used to filter the returned targets. The two valid values are:
- *
+ *
- *
+ * The registration status of the on-premises instances:
+ *
*
+ *
* The status of the stop deployment operation: Pending: The stop operation is pending. Pending: The stop operation is pending. Succeeded: The stop operation was successful. Succeeded: The stop operation was successful. The replacement list of Auto Scaling groups to be included in the deployment
* group, if you want to change them. To keep the Auto Scaling groups, enter their names or do not specify this
+ * To keep the Auto Scaling groups, enter their names or do not specify this
* parameter. To remove Auto Scaling groups, specify a non-null empty list of Auto Scaling group names to detach all CodeDeploy-managed Auto Scaling lifecycle hooks. For examples, see Amazon EC2 instances in an Amazon EC2 Auto Scaling group fail to
+ * To remove Auto Scaling groups, specify a non-null empty list of Auto Scaling group names to detach all CodeDeploy-managed Auto Scaling lifecycle hooks. For examples, see Amazon EC2 instances in an Amazon EC2 Auto Scaling group fail to
* launch and receive the error "Heartbeat Timeout" in the
* CodeDeploy User Guide. Indicates what happens when new Amazon EC2 instances are launched
* mid-deployment and do not receive the deployed application revision. If this option is set to If this option is set to If this option is set to If this option is set to UPDATE
or is unspecified, CodeDeploy initiates
+ * UPDATE
or is unspecified, CodeDeploy initiates
* one or more 'auto-update outdated instances' deployments to apply the deployed
* application revision to the new Amazon EC2 instances.IGNORE
, CodeDeploy does not initiate a
+ * IGNORE
, CodeDeploy does not initiate a
* deployment to update the new Amazon EC2 instances. This may result in instances
* having different revisions.
+ *
*
- * registerTime
: Sort by the time the revisions were registered with
* CodeDeploy.firstUsedTime
: Sort by the time the revisions were first used in
* a deployment.lastUsedTime
: Sort by the time the revisions were last used in a
* deployment.
+ *
*
- * ascending
: ascending order.descending
: descending order.
+ *
*
@@ -5980,31 +5972,31 @@ export interface ListDeploymentInstancesInput {
/**
* @public
* include
: List revisions that are target revisions of a deployment
* group.exclude
: Do not list revisions that are target revisions of a
* deployment group.ignore
: List all revisions.
+ *
*
Pending
: Include those instances with pending deployments.InProgress
: Include those instances where deployments are still
* in progress.Succeeded
: Include those instances with successful
* deployments.Failed
: Include those instances with failed deployments.Skipped
: Include those instances with skipped deployments.Unknown
: Include those instances with deployments in an unknown
* state.applicationName
is specified, then
* deploymentGroupName
must be specified. If it is not specified, then
* deploymentGroupName
must not be specified. deploymentGroupName
is specified, then
* applicationName
must be specified. If it is not specified, then
* applicationName
must not be specified.
+ *
*
Created
: Include created deployments in the resulting
* list.Queued
: Include queued deployments in the resulting list.In Progress
: Include in-progress deployments in the resulting
* list.Succeeded
: Include successful deployments in the resulting
* list.Failed
: Include failed deployments in the resulting list.Stopped
: Include stopped deployments in the resulting
* list.
+ *
*
@@ -6375,14 +6367,14 @@ export interface ListOnPremisesInstancesInput {
/**
* @public
* TargetStatus
- A TargetStatus
filter string can be
+ * TargetStatus
- A TargetStatus
filter string can be
* Failed
, InProgress
, Pending
,
* Ready
, Skipped
, Succeeded
, or
* Unknown
. ServerInstanceLabel
- A ServerInstanceLabel
filter
+ * ServerInstanceLabel
- A ServerInstanceLabel
filter
* string can be Blue
or Green
.
+ *
*
Deregistered
: Include deregistered on-premises instances in the
* resulting list.Registered
: Include registered on-premises instances in the
* resulting list.
+ *
*
*/
@@ -7029,13 +7021,13 @@ export interface UpdateDeploymentGroupInput {
* @public
*
+ *
*
UPDATE
or is unspecified, CodeDeploy initiates
+ * UPDATE
or is unspecified, CodeDeploy initiates
* one or more 'auto-update outdated instances' deployments to apply the deployed
* application revision to the new Amazon EC2 instances.IGNORE
, CodeDeploy does not initiate a
+ * IGNORE
, CodeDeploy does not initiate a
* deployment to update the new Amazon EC2 instances. This may result in instances
* having different revisions.
By proactively detecting and providing recommendations for addressing code defects and implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of your code base during the code review stage. For more information about CodeGuru Reviewer, see the Amazon CodeGuru Reviewer User Guide.
-To improve the security of your CodeGuru Reviewer API calls, you can establish a private connection between your VPC and CodeGuru Reviewer by creating an interface VPC endpoint. For more information, see CodeGuru Reviewer and interface VPC diff --git a/clients/client-codeguru-reviewer/src/CodeGuruReviewer.ts b/clients/client-codeguru-reviewer/src/CodeGuruReviewer.ts index 5078d2e0ee6a0..3b339902b8b95 100644 --- a/clients/client-codeguru-reviewer/src/CodeGuruReviewer.ts +++ b/clients/client-codeguru-reviewer/src/CodeGuruReviewer.ts @@ -318,14 +318,12 @@ export interface CodeGuruReviewer { * service that uses program analysis and machine learning to detect potential defects that * are difficult for developers to find and recommends fixes in your Java and Python * code.
- * *By proactively detecting and providing recommendations for addressing code defects and * implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of * your code base during the code review stage. For more information about CodeGuru Reviewer, see the * * Amazon CodeGuru Reviewer User Guide. *
- * *To improve the security of your CodeGuru Reviewer API calls, you can establish a private connection * between your VPC and CodeGuru Reviewer by creating an interface VPC endpoint. For * more information, see CodeGuru Reviewer and interface VPC diff --git a/clients/client-codeguru-reviewer/src/CodeGuruReviewerClient.ts b/clients/client-codeguru-reviewer/src/CodeGuruReviewerClient.ts index a8b921b133c11..4a923adeb31d1 100644 --- a/clients/client-codeguru-reviewer/src/CodeGuruReviewerClient.ts +++ b/clients/client-codeguru-reviewer/src/CodeGuruReviewerClient.ts @@ -314,14 +314,12 @@ export interface CodeGuruReviewerClientResolvedConfig extends CodeGuruReviewerCl * service that uses program analysis and machine learning to detect potential defects that * are difficult for developers to find and recommends fixes in your Java and Python * code.
- * *By proactively detecting and providing recommendations for addressing code defects and * implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of * your code base during the code review stage. For more information about CodeGuru Reviewer, see the * * Amazon CodeGuru Reviewer User Guide. *
- * *To improve the security of your CodeGuru Reviewer API calls, you can establish a private connection * between your VPC and CodeGuru Reviewer by creating an interface VPC endpoint. For * more information, see CodeGuru Reviewer and interface VPC diff --git a/clients/client-codeguru-reviewer/src/commands/AssociateRepositoryCommand.ts b/clients/client-codeguru-reviewer/src/commands/AssociateRepositoryCommand.ts index 50815e6d695c0..a4b848418731d 100644 --- a/clients/client-codeguru-reviewer/src/commands/AssociateRepositoryCommand.ts +++ b/clients/client-codeguru-reviewer/src/commands/AssociateRepositoryCommand.ts @@ -43,15 +43,12 @@ export interface AssociateRepositoryCommandOutput extends AssociateRepositoryRes * information, see Recommendations in * Amazon CodeGuru Reviewer in the Amazon CodeGuru Reviewer User Guide. *
- * *If you associate a CodeCommit or S3 repository, it must be in the same Amazon Web Services Region and * Amazon Web Services account where its CodeGuru Reviewer code reviews are configured.
- * *Bitbucket and GitHub Enterprise Server repositories are managed by Amazon Web Services CodeStar * Connections to connect to CodeGuru Reviewer. For more information, see Associate a * repository in the Amazon CodeGuru Reviewer User Guide. *
- * *You cannot use the CodeGuru Reviewer SDK or the Amazon Web Services CLI to associate a GitHub repository with * Amazon CodeGuru Reviewer. To associate a GitHub repository, use the console. For more information, see diff --git a/clients/client-codeguru-reviewer/src/endpoint/ruleset.ts b/clients/client-codeguru-reviewer/src/endpoint/ruleset.ts index e6c16c908d417..35aabd929f53a 100644 --- a/clients/client-codeguru-reviewer/src/endpoint/ruleset.ts +++ b/clients/client-codeguru-reviewer/src/endpoint/ruleset.ts @@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/codeguru-reviewer.json */ -const p="required", -q="fn", -r="argv", -s="ref"; -const a="PartitionResult", +const q="required", +r="fn", +s="argv", +t="ref"; +const a="isSet", b="tree", c="error", d="endpoint", -e={[p]:false,"type":"String"}, -f={[p]:true,"default":false,"type":"Boolean"}, -g={[s]:"Endpoint"}, -h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]}, -i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]}, -j={}, -k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]}, -l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]}, -m=[g], -n=[h], -o=[i]; -const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://codeguru-reviewer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://codeguru-reviewer-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://codeguru-reviewer.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://codeguru-reviewer.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]}; +e="PartitionResult", +f={[q]:false,"type":"String"}, +g={[q]:true,"default":false,"type":"Boolean"}, +h={[t]:"Endpoint"}, +i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]}, +j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]}, +k={}, +l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]}, +m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]}, +n=[i], +o=[j], +p=[{[t]:"Region"}]; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://codeguru-reviewer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://codeguru-reviewer-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://codeguru-reviewer.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://codeguru-reviewer.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-codeguru-reviewer/src/index.ts b/clients/client-codeguru-reviewer/src/index.ts index 54e35840511c8..1fb4a9a12e0b5 100644 --- a/clients/client-codeguru-reviewer/src/index.ts +++ b/clients/client-codeguru-reviewer/src/index.ts @@ -5,14 +5,12 @@ * service that uses program analysis and machine learning to detect potential defects that * are difficult for developers to find and recommends fixes in your Java and Python * code.
- * *By proactively detecting and providing recommendations for addressing code defects and * implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of * your code base during the code review stage. For more information about CodeGuru Reviewer, see the * * Amazon CodeGuru Reviewer User Guide. *
- * *To improve the security of your CodeGuru Reviewer API calls, you can establish a private connection
* between your VPC and CodeGuru Reviewer by creating an interface VPC endpoint. For
* more information, see CodeGuru Reviewer and interface VPC
diff --git a/clients/client-codeguru-reviewer/src/models/models_0.ts b/clients/client-codeguru-reviewer/src/models/models_0.ts
index 7324c29077d8f..86aed372dce9a 100644
--- a/clients/client-codeguru-reviewer/src/models/models_0.ts
+++ b/clients/client-codeguru-reviewer/src/models/models_0.ts
@@ -196,18 +196,18 @@ export interface AssociateRepositoryRequest {
/**
* @public
* An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts: A tag key (for example, A tag key (for example,
+ *
*
CostCenter
,
+ * CostCenter
,
* Environment
, Project
, or Secret
). Tag
* keys are case sensitive.
An optional field known as a tag value (for example, + *
An optional field known as a tag value (for example,
* 111122223333
, Production
, or a team name).
* Omitting the tag value is the same as using an empty string. Like tag keys, tag
* values are case sensitive.
An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:
- *A tag key (for example, CostCenter
,
+ *
A tag key (for example, CostCenter
,
* Environment
, Project
, or Secret
). Tag
* keys are case sensitive.
An optional field known as a tag value (for example, + *
An optional field known as a tag value (for example,
* 111122223333
, Production
, or a team name).
* Omitting the tag value is the same as using an empty string. Like tag keys, tag
* values are case sensitive.
An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:
- *A tag key (for example, CostCenter
,
+ *
A tag key (for example, CostCenter
,
* Environment
, Project
, or Secret
). Tag
* keys are case sensitive.
An optional field known as a tag value (for example, + *
An optional field known as a tag value (for example,
* 111122223333
, Production
, or a team name).
* Omitting the tag value is the same as using an empty string. Like tag keys, tag
* values are case sensitive.
An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:
- *A tag key (for example, CostCenter
,
+ *
A tag key (for example, CostCenter
,
* Environment
, Project
, or Secret
). Tag
* keys are case sensitive.
An optional field known as a tag value (for example, + *
An optional field known as a tag value (for example,
* 111122223333
, Production
, or a team name).
* Omitting the tag value is the same as using an empty string. Like tag keys, tag
* values are case sensitive.
An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:
- *A tag key (for example, CostCenter
,
+ *
A tag key (for example, CostCenter
,
* Environment
, Project
, or Secret
). Tag
* keys are case sensitive.
An optional field known as a tag value (for example, + *
An optional field known as a tag value (for example,
* 111122223333
, Production
, or a team name).
* Omitting the tag value is the same as using an empty string. Like tag keys, tag
* values are case sensitive.
An array of key-value pairs used to tag an associated repository. A tag is a custom attribute label with two parts:
- *A tag key (for example, CostCenter
,
+ *
A tag key (for example, CostCenter
,
* Environment
, Project
, or Secret
). Tag
* keys are case sensitive.
An optional field known as a tag value (for example, + *
An optional field known as a tag value (for example,
* 111122223333
, Production
, or a team name).
* Omitting the tag value is the same as using an empty string. Like tag keys, tag
* values are case sensitive.
This AWS CodeStar Notifications API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Notifications API. You can use the AWS CodeStar Notifications API to work with the following objects:
-Notification rules, by calling the following:
Targets, by calling the following:
Events, by calling the following:
For information about how to use AWS CodeStar Notifications, see the Amazon Web Services Developer Tools Console User Guide.
diff --git a/clients/client-codestar-notifications/src/CodestarNotifications.ts b/clients/client-codestar-notifications/src/CodestarNotifications.ts index a7b9b670b128b..e78ef3e7bf5f6 100644 --- a/clients/client-codestar-notifications/src/CodestarNotifications.ts +++ b/clients/client-codestar-notifications/src/CodestarNotifications.ts @@ -258,7 +258,6 @@ export interface CodestarNotifications { *This AWS CodeStar Notifications API Reference provides descriptions and usage examples of the * operations and data types for the AWS CodeStar Notifications API. You can use the AWS CodeStar Notifications API * to work with the following objects:
- * *Notification rules, by calling the following:
*Targets, by calling the following:
*Events, by calling the following:
*For information about how to use AWS CodeStar Notifications, see the Amazon Web Services Developer Tools Console User Guide. + *
For information about how to use AWS CodeStar Notifications, see the Amazon Web Services Developer Tools Console User Guide. *
*/ export class CodestarNotifications extends CodestarNotificationsClient implements CodestarNotifications {} diff --git a/clients/client-codestar-notifications/src/CodestarNotificationsClient.ts b/clients/client-codestar-notifications/src/CodestarNotificationsClient.ts index fe68910bc84a7..43cce9b0fc005 100644 --- a/clients/client-codestar-notifications/src/CodestarNotificationsClient.ts +++ b/clients/client-codestar-notifications/src/CodestarNotificationsClient.ts @@ -301,7 +301,6 @@ export interface CodestarNotificationsClientResolvedConfig extends CodestarNotif *This AWS CodeStar Notifications API Reference provides descriptions and usage examples of the * operations and data types for the AWS CodeStar Notifications API. You can use the AWS CodeStar Notifications API * to work with the following objects:
- * *Notification rules, by calling the following:
*Targets, by calling the following:
*Events, by calling the following:
*For information about how to use AWS CodeStar Notifications, see the Amazon Web Services Developer Tools Console User Guide. + *
For information about how to use AWS CodeStar Notifications, see the Amazon Web Services Developer Tools Console User Guide. *
*/ export class CodestarNotificationsClient extends __Client< diff --git a/clients/client-codestar-notifications/src/endpoint/ruleset.ts b/clients/client-codestar-notifications/src/endpoint/ruleset.ts index a4bf38727d85e..1334e005889b2 100644 --- a/clients/client-codestar-notifications/src/endpoint/ruleset.ts +++ b/clients/client-codestar-notifications/src/endpoint/ruleset.ts @@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/codestar-notifications.json */ -const p="required", -q="fn", -r="argv", -s="ref"; -const a="PartitionResult", +const q="required", +r="fn", +s="argv", +t="ref"; +const a="isSet", b="tree", c="error", d="endpoint", -e={[p]:false,"type":"String"}, -f={[p]:true,"default":false,"type":"Boolean"}, -g={[s]:"Endpoint"}, -h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]}, -i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]}, -j={}, -k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]}, -l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]}, -m=[g], -n=[h], -o=[i]; -const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://codestar-notifications-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{endpoint:{url:"https://codestar-notifications-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://codestar-notifications.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://codestar-notifications.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]}; +e="PartitionResult", +f={[q]:false,"type":"String"}, +g={[q]:true,"default":false,"type":"Boolean"}, +h={[t]:"Endpoint"}, +i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]}, +j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]}, +k={}, +l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]}, +m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]}, +n=[i], +o=[j], +p=[{[t]:"Region"}]; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://codestar-notifications-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://codestar-notifications-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://codestar-notifications.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://codestar-notifications.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-codestar-notifications/src/index.ts b/clients/client-codestar-notifications/src/index.ts index 2685f7caf8e54..176c6e89b5c95 100644 --- a/clients/client-codestar-notifications/src/index.ts +++ b/clients/client-codestar-notifications/src/index.ts @@ -4,7 +4,6 @@ *This AWS CodeStar Notifications API Reference provides descriptions and usage examples of the * operations and data types for the AWS CodeStar Notifications API. You can use the AWS CodeStar Notifications API * to work with the following objects:
- * *Notification rules, by calling the following:
*Targets, by calling the following:
*Events, by calling the following:
*For information about how to use AWS CodeStar Notifications, see the Amazon Web Services Developer Tools Console User Guide. + *
For information about how to use AWS CodeStar Notifications, see the Amazon Web Services Developer Tools Console User Guide. *
* * @packageDocumentation diff --git a/clients/client-codestar-notifications/src/models/models_0.ts b/clients/client-codestar-notifications/src/models/models_0.ts index 179de114deeed..29eaca1e9836e 100644 --- a/clients/client-codestar-notifications/src/models/models_0.ts +++ b/clients/client-codestar-notifications/src/models/models_0.ts @@ -170,10 +170,10 @@ export interface CreateNotificationRuleRequest { * the request cannot be repeated with a changed parameter. If a request with the same * parameters is received and a token is included, the request returns information about * the initial request that used that token. - *The Amazon Web Services SDKs prepopulate client request tokens. If you are using an Amazon Web Services SDK, an * idempotency token is created for you.
- *Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the + *
Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the * familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR * provides a secure, scalable, and reliable registry for your Docker or Open Container * Initiative (OCI) images. Amazon ECR supports private repositories with resource-based * permissions using IAM so that specific users or Amazon EC2 instances can access * repositories and images.
- *Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the + *
Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the * Amazon Web Services General Reference.
*/ export class ECR extends ECRClient implements ECR {} diff --git a/clients/client-ecr/src/ECRClient.ts b/clients/client-ecr/src/ECRClient.ts index 81dc78a5b55c5..9d561b7aa64ba 100644 --- a/clients/client-ecr/src/ECRClient.ts +++ b/clients/client-ecr/src/ECRClient.ts @@ -440,13 +440,13 @@ export interface ECRClientResolvedConfig extends ECRClientResolvedConfigType {} /** * @public *Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the + *
Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the * familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR * provides a secure, scalable, and reliable registry for your Docker or Open Container * Initiative (OCI) images. Amazon ECR supports private repositories with resource-based * permissions using IAM so that specific users or Amazon EC2 instances can access * repositories and images.
- *Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the + *
Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the * Amazon Web Services General Reference.
*/ export class ECRClient extends __Client< diff --git a/clients/client-ecr/src/commands/BatchCheckLayerAvailabilityCommand.ts b/clients/client-ecr/src/commands/BatchCheckLayerAvailabilityCommand.ts index 48c05ad210e73..a3154c4d70607 100644 --- a/clients/client-ecr/src/commands/BatchCheckLayerAvailabilityCommand.ts +++ b/clients/client-ecr/src/commands/BatchCheckLayerAvailabilityCommand.ts @@ -39,10 +39,10 @@ export interface BatchCheckLayerAvailabilityCommandOutput /** * @public *Checks the availability of one or more image layers in a repository.
- *When an image is pushed to a repository, each image layer is checked to verify if it + *
When an image is pushed to a repository, each image layer is checked to verify if it * has been uploaded before. If it has been uploaded, then the image layer is * skipped.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
Deletes a list of specified images within a repository. Images are specified with
* either an imageTag
or imageDigest
.
You can remove a tag from an image by specifying the image's tag in your request. When + *
You can remove a tag from an image by specifying the image's tag in your request. When * you remove the last tag from an image, the image is deleted from your repository.
- *You can completely delete an image (and all of its tags) by specifying the image's + *
You can completely delete an image (and all of its tags) by specifying the image's * digest in your request.
* @example * Use a bare-bones client and the command you need to make an API call. @@ -73,7 +73,7 @@ export interface BatchDeleteImageCommandOutput extends BatchDeleteImageResponse, * // imageDigest: "STRING_VALUE", * // imageTag: "STRING_VALUE", * // }, - * // failureCode: "STRING_VALUE", + * // failureCode: "InvalidImageDigest" || "InvalidImageTag" || "ImageTagDoesNotMatchDigest" || "ImageNotFound" || "MissingDigestAndTag" || "ImageReferencedByManifestList" || "KmsError", * // failureReason: "STRING_VALUE", * // }, * // ], diff --git a/clients/client-ecr/src/commands/BatchGetImageCommand.ts b/clients/client-ecr/src/commands/BatchGetImageCommand.ts index a5ad85df40949..0bdf89b3c609f 100644 --- a/clients/client-ecr/src/commands/BatchGetImageCommand.ts +++ b/clients/client-ecr/src/commands/BatchGetImageCommand.ts @@ -38,7 +38,7 @@ export interface BatchGetImageCommandOutput extends BatchGetImageResponse, __Met * @public *Gets detailed information for an image. Images are specified with either an
* imageTag
or imageDigest
.
When an image is pulled, the BatchGetImage API is called once to retrieve the image + *
When an image is pulled, the BatchGetImage API is called once to retrieve the image * manifest.
* @example * Use a bare-bones client and the command you need to make an API call. @@ -80,7 +80,7 @@ export interface BatchGetImageCommandOutput extends BatchGetImageResponse, __Met * // imageDigest: "STRING_VALUE", * // imageTag: "STRING_VALUE", * // }, - * // failureCode: "STRING_VALUE", + * // failureCode: "InvalidImageDigest" || "InvalidImageTag" || "ImageTagDoesNotMatchDigest" || "ImageNotFound" || "MissingDigestAndTag" || "ImageReferencedByManifestList" || "KmsError", * // failureReason: "STRING_VALUE", * // }, * // ], diff --git a/clients/client-ecr/src/commands/BatchGetRepositoryScanningConfigurationCommand.ts b/clients/client-ecr/src/commands/BatchGetRepositoryScanningConfigurationCommand.ts index 451454fed3b44..4f37814f8aebb 100644 --- a/clients/client-ecr/src/commands/BatchGetRepositoryScanningConfigurationCommand.ts +++ b/clients/client-ecr/src/commands/BatchGetRepositoryScanningConfigurationCommand.ts @@ -65,11 +65,11 @@ export interface BatchGetRepositoryScanningConfigurationCommandOutput * // repositoryArn: "STRING_VALUE", * // repositoryName: "STRING_VALUE", * // scanOnPush: true || false, - * // scanFrequency: "STRING_VALUE", + * // scanFrequency: "SCAN_ON_PUSH" || "CONTINUOUS_SCAN" || "MANUAL", * // appliedScanFilters: [ // ScanningRepositoryFilterList * // { // ScanningRepositoryFilter * // filter: "STRING_VALUE", // required - * // filterType: "STRING_VALUE", // required + * // filterType: "WILDCARD", // required * // }, * // ], * // }, @@ -77,7 +77,7 @@ export interface BatchGetRepositoryScanningConfigurationCommandOutput * // failures: [ // RepositoryScanningConfigurationFailureList * // { // RepositoryScanningConfigurationFailure * // repositoryName: "STRING_VALUE", - * // failureCode: "STRING_VALUE", + * // failureCode: "REPOSITORY_NOT_FOUND", * // failureReason: "STRING_VALUE", * // }, * // ], diff --git a/clients/client-ecr/src/commands/CompleteLayerUploadCommand.ts b/clients/client-ecr/src/commands/CompleteLayerUploadCommand.ts index 08cc1231c4080..567bfbe9f1989 100644 --- a/clients/client-ecr/src/commands/CompleteLayerUploadCommand.ts +++ b/clients/client-ecr/src/commands/CompleteLayerUploadCommand.ts @@ -39,9 +39,9 @@ export interface CompleteLayerUploadCommandOutput extends CompleteLayerUploadRes *Informs Amazon ECR that the image layer upload has completed for a specified registry,
* repository name, and upload ID. You can optionally provide a sha256
digest
* of the image layer for data validation purposes.
When an image is pushed, the CompleteLayerUpload API is called once per each new image + *
When an image is pushed, the CompleteLayerUpload API is called once per each new image * layer to verify that the upload has completed.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
Returns metadata about the images in a repository.
- *Beginning with Docker version 1.9, the Docker client compresses image layers
* before pushing them to a V2 Docker registry. The output of the docker
* images
command shows the uncompressed image size, so it may return a
* larger image size than the image sizes returned by DescribeImages.
Retrieves an authorization token. An authorization token represents your IAM * authentication credentials and can be used to access any Amazon ECR registry that your IAM * principal has access to. The authorization token is valid for 12 hours.
- *The authorizationToken
returned is a base64 encoded string that can be
+ *
The Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can
* only get URLs for image layers that are referenced in an image. When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer
+ * When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer
* that is not already cached. This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the Notifies Amazon ECR that you intend to upload an image layer. When an image is pushed, the InitiateLayerUpload API is called once per image layer
+ * When an image is pushed, the InitiateLayerUpload API is called once per image layer
* that has not already been uploaded. Whether or not an image layer has been uploaded is
* determined by the BatchCheckLayerAvailability API action. This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the Lists all the image IDs for the specified repository. You can filter images based on whether or not they are tagged by using the
+ * You can filter images based on whether or not they are tagged by using the
* Creates or updates the image manifest and tags associated with an image. When an image is pushed and all new image layers have been uploaded, the PutImage API
+ * When an image is pushed and all new image layers have been uploaded, the PutImage API
* is called once to create or update the image manifest and the tags associated with the
* image. This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the The Updates the image scanning configuration for the specified repository. Updates the image scanning configuration for the specified repository. Creates or updates the permissions policy for your registry. A registry policy is used to specify permissions for another Amazon Web Services account and is used
+ * A registry policy is used to specify permissions for another Amazon Web Services account and is used
* when configuring cross-account replication. For more information, see Registry permissions in the Amazon Elastic Container Registry User Guide.authorizationToken
returned is a base64 encoded string that can be
* decoded and used in a docker login
command to authenticate to a registry.
* The CLI offers an get-login-password
command that simplifies the login
* process. For more information, see Registry
diff --git a/clients/client-ecr/src/commands/GetDownloadUrlForLayerCommand.ts b/clients/client-ecr/src/commands/GetDownloadUrlForLayerCommand.ts
index 8a2efdaccfe0a..36f5d0dd2d951 100644
--- a/clients/client-ecr/src/commands/GetDownloadUrlForLayerCommand.ts
+++ b/clients/client-ecr/src/commands/GetDownloadUrlForLayerCommand.ts
@@ -38,9 +38,9 @@ export interface GetDownloadUrlForLayerCommandOutput extends GetDownloadUrlForLa
* @public
* docker
CLI to pull, tag, and push images.docker
CLI to pull, tag, and push images.tagStatus
filter and specifying either TAGGED
,
* UNTAGGED
or ANY
. For example, you can filter your results
* to return only UNTAGGED
images and then pipe that result to a BatchDeleteImage operation to delete them. Or, you can filter your
@@ -55,7 +55,7 @@ export interface ListImagesCommandOutput extends ListImagesResponse, __MetadataB
* nextToken: "STRING_VALUE",
* maxResults: Number("int"),
* filter: { // ListImagesFilter
- * tagStatus: "STRING_VALUE",
+ * tagStatus: "TAGGED" || "UNTAGGED" || "ANY",
* },
* };
* const command = new ListImagesCommand(input);
diff --git a/clients/client-ecr/src/commands/PutImageCommand.ts b/clients/client-ecr/src/commands/PutImageCommand.ts
index 932336431b885..bac4075f62c64 100644
--- a/clients/client-ecr/src/commands/PutImageCommand.ts
+++ b/clients/client-ecr/src/commands/PutImageCommand.ts
@@ -37,11 +37,10 @@ export interface PutImageCommandOutput extends PutImageResponse, __MetadataBeare
/**
* @public
* docker
CLI to pull, tag, and push images.PutImageScanningConfiguration
API is being deprecated, in favor
* of specifying the image scanning configuration at the registry level. For more
* information, see PutRegistryScanningConfiguration.
When configuring cross-account replication, the destination account must grant the * source account permission to replicate. This permission is controlled using a * registry permissions policy. For more information, see PutRegistryPolicy.
- *Uploads an image layer part to Amazon ECR.
- *When an image is pushed, each new image layer is uploaded in parts. The maximum size + *
When an image is pushed, each new image layer is uploaded in parts. The maximum size * of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API * is called once per each new image layer part.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the + *
Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the * familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR * provides a secure, scalable, and reliable registry for your Docker or Open Container * Initiative (OCI) images. Amazon ECR supports private repositories with resource-based * permissions using IAM so that specific users or Amazon EC2 instances can access * repositories and images.
- *Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the + *
Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the * Amazon Web Services General Reference.
* * @packageDocumentation diff --git a/clients/client-ecr/src/models/models_0.ts b/clients/client-ecr/src/models/models_0.ts index 78a36dfe235ab..ba77867292775 100644 --- a/clients/client-ecr/src/models/models_0.ts +++ b/clients/client-ecr/src/models/models_0.ts @@ -325,7 +325,7 @@ export interface BatchGetImageRequest { /** * @public *The accepted media types for the request.
- *Valid values: application/vnd.docker.distribution.manifest.v1+json
|
+ *
Valid values: application/vnd.docker.distribution.manifest.v1+json
|
* application/vnd.docker.distribution.manifest.v2+json
|
* application/vnd.oci.image.manifest.v1+json
*
The encryption configuration for the repository. This determines how the contents of * your repository are encrypted at rest.
- *By default, when no encryption configuration is set or the AES256
+ *
By default, when no encryption configuration is set or the AES256
* encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption
* keys which encrypts your data at rest using an AES-256 encryption algorithm. This does
* not require any action on your part.
For more control over the encryption of the contents of your repository, you can use + *
For more control over the encryption of the contents of your repository, you can use * server-side encryption with Key Management Service key stored in Key Management Service (KMS) to encrypt your * images. For more information, see Amazon ECR encryption at * rest in the Amazon Elastic Container Registry User Guide.
@@ -896,14 +896,14 @@ export interface EncryptionConfiguration { /** * @public *The encryption type to use.
- *If you use the KMS
encryption type, the contents of the repository will
+ *
If you use the KMS
encryption type, the contents of the repository will
* be encrypted using server-side encryption with Key Management Service key stored in KMS. When you
* use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key
* for Amazon ECR, or specify your own KMS key, which you already created. For more
* information, see Protecting data using server-side
* encryption with an KMS key stored in Key Management Service (SSE-KMS) in the
* Amazon Simple Storage Service Console Developer Guide.
If you use the AES256
encryption type, Amazon ECR uses server-side encryption
+ *
If you use the AES256
encryption type, Amazon ECR uses server-side encryption
* with Amazon S3-managed encryption keys which encrypts the images in the repository using an
* AES-256 encryption algorithm. For more information, see Protecting data using
* server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the
@@ -1762,14 +1762,14 @@ export interface ImageDetail {
/**
* @public
*
The size, in bytes, of the image in the repository.
- *If the image is a manifest list, this will be the max size of all manifests in the + *
If the image is a manifest list, this will be the max size of all manifests in the * list.
- *Beginning with Docker version 1.9, the Docker client compresses image layers
* before pushing them to a V2 Docker registry. The output of the docker
* images
command shows the uncompressed image size, so it may return a
* larger image size than the image sizes returned by DescribeImages.
The date and time, expressed in standard JavaScript date format, when Amazon ECR recorded * the last image pull.
- *Amazon ECR refreshes the last image pull timestamp at least once every 24 hours. For
* example, if you pull an image once a day then the lastRecordedPullTime
* timestamp will indicate the exact time that the image was last pulled. However, if
* you pull an image once an hour, because Amazon ECR refreshes the
* lastRecordedPullTime
timestamp at least once every 24 hours, the
* result may not be the exact time that the image was last pulled.
nextToken
value. This value is
* null
when there are no more results to return. This option cannot be
* used when you specify repositories with repositoryNames
.
- * This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
- *nextToken
value. This value is
* null
when there are no more results to return.
- * This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
- *The scanning type to set for the registry.
- *When a registry scanning configuration is not defined, by default the + *
When a registry scanning configuration is not defined, by default the
* BASIC
scan type is used. When basic scanning is used, you may specify
* filters to determine which individual repositories, or all repositories, are scanned
* when new images are pushed to those repositories. Alternatively, you can do manual scans
* of images with basic scanning.
When the ENHANCED
scan type is set, Amazon Inspector provides automated
+ *
When the ENHANCED
scan type is set, Amazon Inspector provides automated
* vulnerability scanning. You may choose between continuous scanning or scan on push and
* you may specify filters to determine which individual repositories, or all repositories,
* are scanned.
XML
– XML source file format.
*
* Here is an example of how you could specify the formatParams
:
*
@@ -3073,7 +3072,6 @@ export interface UpdateChangesetRequest {
*
XML
– XML source file format.
Here is an example of how you could specify the formatParams
:
* Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 delivery streams per Amazon Web Services
* Region. This is an asynchronous operation that immediately returns. The initial status of the
@@ -80,7 +79,6 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO
* boundaries might be such that the size is a little over or under the configured buffering
* size. By default, no encryption is performed. We strongly recommend that you enable
* encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination:
diff --git a/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts
index 8846adb4daa2f..d0164119b9229 100644
--- a/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts
+++ b/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts
@@ -41,7 +41,6 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO
/**
* @public
*
*
COPY
syntax to load data into an Amazon Redshift table. This is
* specified in the RedshiftDestinationConfiguration.S3Configuration
* parameter.
The compression formats SNAPPY
or ZIP
cannot be
@@ -101,7 +98,6 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO
*
We strongly recommend that you use the user name and password you provide
* exclusively with Kinesis Data Firehose, and that the permissions for the account are
* restricted for Amazon Redshift INSERT
permissions.
Kinesis Data Firehose assumes the IAM role that is configured as part of the diff --git a/clients/client-firehose/src/commands/PutRecordBatchCommand.ts b/clients/client-firehose/src/commands/PutRecordBatchCommand.ts index 341d4d3a968c5..dd5d10fd1796d 100644 --- a/clients/client-firehose/src/commands/PutRecordBatchCommand.ts +++ b/clients/client-firehose/src/commands/PutRecordBatchCommand.ts @@ -78,7 +78,6 @@ export interface PutRecordBatchCommandOutput extends PutRecordBatchOutput, __Met *
If PutRecordBatch throws ServiceUnavailableException
,
* back off and retry. If the exception persists, it is possible that the throughput limits
* have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they * are added to a delivery stream as it attempts to send the records to the destination. If * the destination is unreachable for more than 24 hours, the data is no longer diff --git a/clients/client-firehose/src/commands/PutRecordCommand.ts b/clients/client-firehose/src/commands/PutRecordCommand.ts index d81d16eb4c81e..f1c9abfe7b3f7 100644 --- a/clients/client-firehose/src/commands/PutRecordCommand.ts +++ b/clients/client-firehose/src/commands/PutRecordCommand.ts @@ -63,7 +63,6 @@ export interface PutRecordCommandOutput extends PutRecordOutput, __MetadataBeare * are added to a delivery stream as it tries to send the records to the destination. If the * destination is unreachable for more than 24 hours, the data is no longer * available.
- * *Don't concatenate two or more base64 strings to form the data fields of your records. * Instead, concatenate the raw data, then perform base64 encoding.
diff --git a/clients/client-firehose/src/commands/UpdateDestinationCommand.ts b/clients/client-firehose/src/commands/UpdateDestinationCommand.ts index 619fe342d74a1..c1988f6ec1386 100644 --- a/clients/client-firehose/src/commands/UpdateDestinationCommand.ts +++ b/clients/client-firehose/src/commands/UpdateDestinationCommand.ts @@ -41,7 +41,6 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput, /** * @public *Updates the specified destination of the specified delivery stream.
- * *Use this operation to change the destination type (for example, to replace the Amazon * S3 destination with Amazon Redshift) or change the parameters associated with a destination * (for example, to change the bucket name of the Amazon S3 destination). The update might not @@ -58,7 +57,6 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput, *
If the destination type is not the same, for example, changing the destination from * Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this * case, all parameters must be specified.
- * *Kinesis Data Firehose uses CurrentDeliveryStreamVersionId
to avoid race
* conditions and conflicting merges. This is a required field, and the service updates the
* configuration only if the existing configuration has a version ID that matches. After the
diff --git a/clients/client-firehose/src/endpoint/ruleset.ts b/clients/client-firehose/src/endpoint/ruleset.ts
index 9d30402a87f4d..af082e4af5d35 100644
--- a/clients/client-firehose/src/endpoint/ruleset.ts
+++ b/clients/client-firehose/src/endpoint/ruleset.ts
@@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types";
or see "smithy.rules#endpointRuleSet"
in codegen/sdk-codegen/aws-models/firehose.json */
-const p="required",
-q="fn",
-r="argv",
-s="ref";
-const a="PartitionResult",
+const q="required",
+r="fn",
+s="argv",
+t="ref";
+const a="isSet",
b="tree",
c="error",
d="endpoint",
-e={[p]:false,"type":"String"},
-f={[p]:true,"default":false,"type":"Boolean"},
-g={[s]:"Endpoint"},
-h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]},
-i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]},
-j={},
-k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]},
-l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]},
-m=[g],
-n=[h],
-o=[i];
-const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://firehose-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://firehose-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://firehose.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://firehose.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]};
+e="PartitionResult",
+f={[q]:false,"type":"String"},
+g={[q]:true,"default":false,"type":"Boolean"},
+h={[t]:"Endpoint"},
+i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]},
+j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]},
+k={},
+l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]},
+m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]},
+n=[i],
+o=[j],
+p=[{[t]:"Region"}];
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://firehose-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://firehose-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://firehose.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://firehose.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-firehose/src/models/models_0.ts b/clients/client-firehose/src/models/models_0.ts
index 8279b1424b4b5..87d61600ca33a 100644
--- a/clients/client-firehose/src/models/models_0.ts
+++ b/clients/client-firehose/src/models/models_0.ts
@@ -1453,7 +1453,6 @@ export interface ElasticsearchDestinationConfiguration {
* for DescribeDomain
, DescribeDomains
, and
* DescribeDomainConfig
after assuming the role specified in RoleARN. For more information, see Amazon Resource Names (ARNs) and
* Amazon Web Services Service Namespaces.
Specify either ClusterEndpoint
or DomainARN
.
The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per * index. If you try to specify a new type for an existing index that already has another * type, Kinesis Data Firehose returns an error during run time.
- * *For Elasticsearch 7.x, don't specify a TypeName
.
You can specify up to 50 tags when creating a delivery stream.
*/ Tags?: Tag[]; @@ -3009,7 +3006,6 @@ export interface ElasticsearchDestinationDescription { * @public *The ARN of the Amazon ES domain. For more information, see Amazon * Resource Names (ARNs) and Amazon Web Services Service Namespaces.
- * *Kinesis Data Firehose uses either ClusterEndpoint
or DomainARN
* to send data to Amazon ES.
DescribeDomainConfig
after assuming the IAM role specified in
* RoleARN
. For more information, see Amazon Resource Names (ARNs) and
* Amazon Web Services Service Namespaces.
- *
* Specify either ClusterEndpoint
or DomainARN
.
The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per * index. If you try to specify a new type for an existing index that already has another * type, Kinesis Data Firehose returns an error during runtime.
- * *If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream,
* Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type
* name. If you want to update your delivery stream with a new index name, provide an empty
diff --git a/clients/client-fis/src/endpoint/EndpointParameters.ts b/clients/client-fis/src/endpoint/EndpointParameters.ts
index 6fb8e33df8987..1595f6619e0d7 100644
--- a/clients/client-fis/src/endpoint/EndpointParameters.ts
+++ b/clients/client-fis/src/endpoint/EndpointParameters.ts
@@ -27,7 +27,7 @@ export const resolveClientEndpointParameters =
+ *
* If a column is not explicitly set in a specific row, then the column level formula specified in the table * will be applied to the new row. If there is no column level formula but the last row of the table has a * formula, then that formula will be copied down to the new row. If there is no column level formula and diff --git a/clients/client-honeycode/src/commands/BatchUpdateTableRowsCommand.ts b/clients/client-honeycode/src/commands/BatchUpdateTableRowsCommand.ts index 5c864f716d217..30b285278bd20 100644 --- a/clients/client-honeycode/src/commands/BatchUpdateTableRowsCommand.ts +++ b/clients/client-honeycode/src/commands/BatchUpdateTableRowsCommand.ts @@ -43,7 +43,7 @@ export interface BatchUpdateTableRowsCommandOutput extends BatchUpdateTableRowsR *
* The BatchUpdateTableRows API allows you to update one or more rows in a table in a workbook. *
- *+ *
* You can specify the values to set in some or all of the columns in the table for the specified * rows. * If a column is not explicitly specified in a particular row, then that column will not be updated diff --git a/clients/client-honeycode/src/commands/BatchUpsertTableRowsCommand.ts b/clients/client-honeycode/src/commands/BatchUpsertTableRowsCommand.ts index 2806f9ae51c3b..9545dc91e6975 100644 --- a/clients/client-honeycode/src/commands/BatchUpsertTableRowsCommand.ts +++ b/clients/client-honeycode/src/commands/BatchUpsertTableRowsCommand.ts @@ -47,7 +47,7 @@ export interface BatchUpsertTableRowsCommandOutput extends BatchUpsertTableRowsR * in the request. If no matching rows are found, a new row is added at the end of the table and the cells in * that row are set to the new values specified in the request. *
- *+ *
* You can specify the values to set in some or all of the columns in the table for the * matching or newly appended rows. If a column is not explicitly specified for a particular row, then that * column will not be updated for that row. To clear out the data in a specific cell, you need to set the value @@ -89,7 +89,7 @@ export interface BatchUpsertTableRowsCommandOutput extends BatchUpsertTableRowsR * // rowIds: [ // RowIdList // required * // "STRING_VALUE", * // ], - * // upsertAction: "STRING_VALUE", // required + * // upsertAction: "UPDATED" || "APPENDED", // required * // }, * // }, * // workbookCursor: Number("long"), // required diff --git a/clients/client-honeycode/src/commands/DescribeTableDataImportJobCommand.ts b/clients/client-honeycode/src/commands/DescribeTableDataImportJobCommand.ts index 1bf100c6c5878..6b41bb7738e0c 100644 --- a/clients/client-honeycode/src/commands/DescribeTableDataImportJobCommand.ts +++ b/clients/client-honeycode/src/commands/DescribeTableDataImportJobCommand.ts @@ -57,7 +57,7 @@ export interface DescribeTableDataImportJobCommandOutput extends DescribeTableDa * const command = new DescribeTableDataImportJobCommand(input); * const response = await client.send(command); * // { // DescribeTableDataImportJobResult - * // jobStatus: "STRING_VALUE", // required + * // jobStatus: "SUBMITTED" || "IN_PROGRESS" || "COMPLETED" || "FAILED", // required * // message: "STRING_VALUE", // required * // jobMetadata: { // TableDataImportJobMetadata * // submitter: { // ImportJobSubmitter @@ -77,7 +77,7 @@ export interface DescribeTableDataImportJobCommandOutput extends DescribeTableDa * // delimiter: "STRING_VALUE", // required * // hasHeaderRow: true || false, * // ignoreEmptyRows: true || false, - * // dataCharacterEncoding: "STRING_VALUE", + * // dataCharacterEncoding: "UTF-8" || "US-ASCII" || "ISO-8859-1" || "UTF-16BE" || "UTF-16LE" || "UTF-16", * // }, * // }, * // dataSource: { // ImportDataSource @@ -86,7 +86,7 @@ export interface DescribeTableDataImportJobCommandOutput extends DescribeTableDa * // }, * // }, * // }, - * // errorCode: "STRING_VALUE", + * // errorCode: "ACCESS_DENIED" || "INVALID_URL_ERROR" || "INVALID_IMPORT_OPTIONS_ERROR" || "INVALID_TABLE_ID_ERROR" || "INVALID_TABLE_COLUMN_ID_ERROR" || "TABLE_NOT_FOUND_ERROR" || "FILE_EMPTY_ERROR" || "INVALID_FILE_TYPE_ERROR" || "FILE_PARSING_ERROR" || "FILE_SIZE_LIMIT_ERROR" || "FILE_NOT_FOUND_ERROR" || "UNKNOWN_ERROR" || "RESOURCE_NOT_FOUND_ERROR" || "SYSTEM_LIMIT_ERROR", * // }; * * ``` diff --git a/clients/client-honeycode/src/commands/GetScreenDataCommand.ts b/clients/client-honeycode/src/commands/GetScreenDataCommand.ts index 73534f6112c71..fac52472cf358 100644 --- a/clients/client-honeycode/src/commands/GetScreenDataCommand.ts +++ b/clients/client-honeycode/src/commands/GetScreenDataCommand.ts @@ -72,7 +72,7 @@ export interface GetScreenDataCommandOutput extends GetScreenDataResult, __Metad * // headers: [ // ResultHeader // required * // { // ColumnMetadata * // name: "STRING_VALUE", // required - * // format: "STRING_VALUE", // required + * // format: "AUTO" || "NUMBER" || "CURRENCY" || "DATE" || "TIME" || "DATE_TIME" || "PERCENTAGE" || "TEXT" || "ACCOUNTING" || "CONTACT" || "ROWLINK" || "ROWSET", // required * // }, * // ], * // rows: [ // ResultRows // required @@ -80,7 +80,7 @@ export interface GetScreenDataCommandOutput extends GetScreenDataResult, __Metad * // rowId: "STRING_VALUE", * // dataItems: [ // DataItems // required * // { // DataItem - * // overrideFormat: "STRING_VALUE", + * // overrideFormat: "AUTO" || "NUMBER" || "CURRENCY" || "DATE" || "TIME" || "DATE_TIME" || "PERCENTAGE" || "TEXT" || "ACCOUNTING" || "CONTACT" || "ROWLINK" || "ROWSET", * // rawValue: "STRING_VALUE", * // formattedValue: "STRING_VALUE", * // }, diff --git a/clients/client-honeycode/src/commands/ListTableColumnsCommand.ts b/clients/client-honeycode/src/commands/ListTableColumnsCommand.ts index e73e0bd145717..4ce7c655a0a56 100644 --- a/clients/client-honeycode/src/commands/ListTableColumnsCommand.ts +++ b/clients/client-honeycode/src/commands/ListTableColumnsCommand.ts @@ -57,7 +57,7 @@ export interface ListTableColumnsCommandOutput extends ListTableColumnsResult, _ * // { // TableColumn * // tableColumnId: "STRING_VALUE", * // tableColumnName: "STRING_VALUE", - * // format: "STRING_VALUE", + * // format: "AUTO" || "NUMBER" || "CURRENCY" || "DATE" || "TIME" || "DATE_TIME" || "PERCENTAGE" || "TEXT" || "ACCOUNTING" || "CONTACT" || "ROWLINK" || "ROWSET", * // }, * // ], * // nextToken: "STRING_VALUE", diff --git a/clients/client-honeycode/src/commands/ListTableRowsCommand.ts b/clients/client-honeycode/src/commands/ListTableRowsCommand.ts index b7f8a43061339..35d010dd75d37 100644 --- a/clients/client-honeycode/src/commands/ListTableRowsCommand.ts +++ b/clients/client-honeycode/src/commands/ListTableRowsCommand.ts @@ -66,7 +66,7 @@ export interface ListTableRowsCommandOutput extends ListTableRowsResult, __Metad * // cells: [ // Cells // required * // { // Cell * // formula: "STRING_VALUE", - * // format: "STRING_VALUE", + * // format: "AUTO" || "NUMBER" || "CURRENCY" || "DATE" || "TIME" || "DATE_TIME" || "PERCENTAGE" || "TEXT" || "ACCOUNTING" || "CONTACT" || "ROWLINK" || "ROWSET", * // rawValue: "STRING_VALUE", * // formattedValue: "STRING_VALUE", * // formattedValues: [ // FormattedValuesList diff --git a/clients/client-honeycode/src/commands/QueryTableRowsCommand.ts b/clients/client-honeycode/src/commands/QueryTableRowsCommand.ts index 0919b49537877..a79bf5bf1efda 100644 --- a/clients/client-honeycode/src/commands/QueryTableRowsCommand.ts +++ b/clients/client-honeycode/src/commands/QueryTableRowsCommand.ts @@ -72,7 +72,7 @@ export interface QueryTableRowsCommandOutput extends QueryTableRowsResult, __Met * // cells: [ // Cells // required * // { // Cell * // formula: "STRING_VALUE", - * // format: "STRING_VALUE", + * // format: "AUTO" || "NUMBER" || "CURRENCY" || "DATE" || "TIME" || "DATE_TIME" || "PERCENTAGE" || "TEXT" || "ACCOUNTING" || "CONTACT" || "ROWLINK" || "ROWSET", * // rawValue: "STRING_VALUE", * // formattedValue: "STRING_VALUE", * // formattedValues: [ // FormattedValuesList diff --git a/clients/client-honeycode/src/commands/StartTableDataImportJobCommand.ts b/clients/client-honeycode/src/commands/StartTableDataImportJobCommand.ts index 5229894f015ce..680241db3393a 100644 --- a/clients/client-honeycode/src/commands/StartTableDataImportJobCommand.ts +++ b/clients/client-honeycode/src/commands/StartTableDataImportJobCommand.ts @@ -58,7 +58,7 @@ export interface StartTableDataImportJobCommandOutput extends StartTableDataImpo * dataSourceUrl: "STRING_VALUE", * }, * }, - * dataFormat: "STRING_VALUE", // required + * dataFormat: "DELIMITED_TEXT", // required * destinationTableId: "STRING_VALUE", // required * importOptions: { // ImportOptions * destinationOptions: { // DestinationOptions @@ -72,7 +72,7 @@ export interface StartTableDataImportJobCommandOutput extends StartTableDataImpo * delimiter: "STRING_VALUE", // required * hasHeaderRow: true || false, * ignoreEmptyRows: true || false, - * dataCharacterEncoding: "STRING_VALUE", + * dataCharacterEncoding: "UTF-8" || "US-ASCII" || "ISO-8859-1" || "UTF-16BE" || "UTF-16LE" || "UTF-16", * }, * }, * clientRequestToken: "STRING_VALUE", // required @@ -81,7 +81,7 @@ export interface StartTableDataImportJobCommandOutput extends StartTableDataImpo * const response = await client.send(command); * // { // StartTableDataImportJobResult * // jobId: "STRING_VALUE", // required - * // jobStatus: "STRING_VALUE", // required + * // jobStatus: "SUBMITTED" || "IN_PROGRESS" || "COMPLETED" || "FAILED", // required * // }; * * ``` diff --git a/clients/client-honeycode/src/endpoint/ruleset.ts b/clients/client-honeycode/src/endpoint/ruleset.ts index 1aedf72dcc482..37a2a73d71c60 100644 --- a/clients/client-honeycode/src/endpoint/ruleset.ts +++ b/clients/client-honeycode/src/endpoint/ruleset.ts @@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/honeycode.json */ -const p="required", -q="fn", -r="argv", -s="ref"; -const a="PartitionResult", +const q="required", +r="fn", +s="argv", +t="ref"; +const a="isSet", b="tree", c="error", d="endpoint", -e={[p]:false,"type":"String"}, -f={[p]:true,"default":false,"type":"Boolean"}, -g={[s]:"Endpoint"}, -h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]}, -i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]}, -j={}, -k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]}, -l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]}, -m=[g], -n=[h], -o=[i]; -const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://honeycode-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://honeycode-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://honeycode.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://honeycode.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]}; +e="PartitionResult", +f={[q]:false,"type":"String"}, +g={[q]:true,"default":false,"type":"Boolean"}, +h={[t]:"Endpoint"}, +i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]}, +j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]}, +k={}, +l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]}, +m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]}, +n=[i], +o=[j], +p=[{[t]:"Region"}]; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://honeycode-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://honeycode-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://honeycode.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://honeycode.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-honeycode/src/models/models_0.ts b/clients/client-honeycode/src/models/models_0.ts index 705dfc9c2b892..3a35cde5ad8b1 100644 --- a/clients/client-honeycode/src/models/models_0.ts +++ b/clients/client-honeycode/src/models/models_0.ts @@ -71,12 +71,12 @@ export class AutomationExecutionTimeoutException extends __BaseException { *
* CellInput object contains the data needed to create or update cells in a table. *
- ** CellInput object has only a facts field or a fact field, but not both. A 400 bad request will be * thrown if both fact and facts field are present. *
- *The ID of the workbook where the new rows are being added.
- *+ *
* If a workbook with the specified ID could not be found, this API throws ResourceNotFoundException. *
*/ @@ -142,7 +142,7 @@ export interface BatchCreateTableRowsRequest { /** * @public *The ID of the table where the new rows are being added.
- *+ *
* If a table with the specified ID could not be found, this API throws ResourceNotFoundException. *
*/ @@ -155,7 +155,7 @@ export interface BatchCreateTableRowsRequest { * to uniquely identify the element in the request and the cells to create for that row. * You need to specify at least one item in this list. * - *+ *
* Note that if one of the column ids in any of the rows in the request does not exist in the table, then the * request fails and no updates are made to the table. *
@@ -171,7 +171,7 @@ export interface BatchCreateTableRowsRequest { * that if the first call using that request token is successfully performed, the second call will not perform * the operation again. * - *+ *
* Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests * spanning hours or days. *
@@ -384,7 +384,7 @@ export interface BatchDeleteTableRowsRequest { /** * @public *The ID of the workbook where the rows are being deleted.
- *+ *
* If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -393,7 +393,7 @@ export interface BatchDeleteTableRowsRequest { /** * @public *The ID of the table where the rows are being deleted.
- *+ *
* If a table with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -404,7 +404,7 @@ export interface BatchDeleteTableRowsRequest { ** The list of row ids to delete from the table. You need to specify at least one row id in this list. *
- *+ *
* Note that if one of the row ids provided in the request does not exist in the table, then the request fails * and no rows are deleted from the table. *
@@ -420,7 +420,7 @@ export interface BatchDeleteTableRowsRequest { * that if the first call using that request token is successfully performed, the second call will not perform * the action again. * - *+ *
* Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests * spanning hours or days. *
@@ -481,7 +481,7 @@ export interface BatchUpdateTableRowsRequest { /** * @public *The ID of the workbook where the rows are being updated.
- *+ *
* If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -490,7 +490,7 @@ export interface BatchUpdateTableRowsRequest { /** * @public *The ID of the table where the rows are being updated.
- *+ *
* If a table with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -504,7 +504,7 @@ export interface BatchUpdateTableRowsRequest { * You need to specify at least one row in this list, and for each row, you need to specify at least one * column to update. * - *+ *
* Note that if one of the row or column ids in the request does not exist in the table, then the request fails * and no updates are made to the table. *
@@ -520,7 +520,7 @@ export interface BatchUpdateTableRowsRequest { * that if the first call using that request token is successfully performed, the second call will not perform * the action again. * - *+ *
* Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests * spanning hours or days. *
@@ -605,7 +605,7 @@ export interface UpsertRowData { * rows. If the formula returns 0 rows, then a new row will be appended in the target table. If the formula * returns one or more rows, then the returned rows will be updated. * - *+ *
* Note that the filter formula needs to return rows from the target table for the upsert operation to succeed. * If the filter formula has a syntax error or it doesn't evaluate to zero or more rows in the target table * for any one item in the input list, then the entire BatchUpsertTableRows request fails and no updates are @@ -631,7 +631,7 @@ export interface BatchUpsertTableRowsRequest { /** * @public *
The ID of the workbook where the rows are being upserted.
- *+ *
* If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -640,7 +640,7 @@ export interface BatchUpsertTableRowsRequest { /** * @public *The ID of the table where the rows are being upserted.
- *+ *
* If a table with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -654,7 +654,7 @@ export interface BatchUpsertTableRowsRequest { * and the cell values to set for each column in the upserted rows. You need to specify * at least one item in this list. * - *+ *
* Note that if one of the filter formulas in the request fails to evaluate because of an error or one of the * column ids in any of the rows does not exist in the table, then the request fails * and no updates are made to the table. @@ -671,7 +671,7 @@ export interface BatchUpsertTableRowsRequest { * that if the first call using that request token is successfully performed, the second call will not perform * the action again. *
- *+ *
* Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests * spanning hours or days. *
@@ -800,7 +800,7 @@ export interface Cell { * The raw value of the data contained in the cell. The raw value depends on the format of the data in the * cell. However the attribute in the API return value is always a string containing the raw value. * - *+ *
* Cells with format DATE, DATE_TIME or TIME have the raw value as a floating point * number where the whole number represents the number of days since 1/1/1900 and the fractional part * represents the fraction of the day since midnight. For example, a cell with date 11/3/2020 has the raw value @@ -808,7 +808,7 @@ export interface Cell { * 11/3/2020 9:00 AM has the raw value "44138.375". Notice that even though the raw value is a number in all * three cases, it is still represented as a string. *
- *+ *
* Cells with format NUMBER, CURRENCY, PERCENTAGE and ACCOUNTING have the raw value of the data as the number * representing the data being displayed. For example, the number 1.325 with two decimal places in the format * will have it's raw value as "1.325" and formatted value as "1.33". A currency value for @@ -816,34 +816,34 @@ export interface Cell { * decimal places in the format will have its raw value as "0.2" and the formatted value as "20.00%". An * accounting value of -$25 will have "-25" as the raw value and "$ (25.00)" as the formatted value. *
- *+ *
* Cells with format TEXT will have the raw text as the raw value. For example, a cell with text "John Smith" * will have "John Smith" as both the raw value and the formatted value. *
- *+ *
* Cells with format CONTACT will have the name of the contact as a formatted value and the email address of * the contact as the raw value. For example, a contact for John Smith will have "John Smith" as the * formatted value and "john.smith@example.com" as the raw value. *
- *+ *
* Cells with format ROWLINK (aka picklist) will have the first column of the linked row as the formatted value * and the row id of the linked row as the raw value. For example, a cell containing a picklist to a table * that displays task status might have "Completed" as the formatted value and * "row:dfcefaee-5b37-4355-8f28-40c3e4ff5dd4/ca432b2f-b8eb-431d-9fb5-cbe0342f9f03" as the raw value. *
- *+ *
* Cells with format ROWSET (aka multi-select or multi-record picklist) will by default have the first column * of each of the linked rows as the formatted value in the list, and the rowset id of the linked rows as the * raw value. For example, a cell containing a multi-select picklist to a table that contains items might have * "Item A", "Item B" in the formatted value list and "rows:b742c1f4-6cb0-4650-a845-35eb86fcc2bb/ * [fdea123b-8f68-474a-aa8a-5ff87aa333af,6daf41f0-a138-4eee-89da-123086d36ecf]" as the raw value. *
- *+ *
* Cells with format ATTACHMENT will have the name of the attachment as the formatted value and the attachment * id as the raw value. For example, a cell containing an attachment named "image.jpeg" will have * "image.jpeg" as the formatted value and "attachment:ca432b2f-b8eb-431d-9fb5-cbe0342f9f03" as the raw value. *
- *+ *
* Cells with format AUTO or cells without any format that are auto-detected as one of the formats above will * contain the raw and formatted values as mentioned above, based on the auto-detected formats. If there is no * auto-detected format, the raw and formatted values will be the same as the data in the cell. @@ -856,7 +856,7 @@ export interface Cell { *
* The formatted value of the cell. This is the value that you see displayed in the cell in the UI. *
- *+ *
* Note that the formatted value of a cell is always represented as a string irrespective of the data that is * stored in the cell. For example, if a cell contains a date, the formatted value of the cell is the string * representation of the formatted date being shown in the cell in the UI. See details in the rawValue field @@ -979,7 +979,7 @@ export interface DescribeTableDataImportJobRequest { /** * @public *
The ID of the workbook into which data was imported.
- *+ *
* If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -988,7 +988,7 @@ export interface DescribeTableDataImportJobRequest { /** * @public *The ID of the table into which data was imported.
- *+ *
* If a table with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -997,7 +997,7 @@ export interface DescribeTableDataImportJobRequest { /** * @public *The ID of the job that was returned by the StartTableDataImportJob request.
- *+ *
* If a job with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1252,7 +1252,7 @@ export interface GetScreenDataRequest { * The number of results to be returned on a single page. * Specify a number between 1 and 100. The maximum value is 100. * - *+ *
* This parameter is optional. If you don't specify this parameter, the default page size is 100. *
*/ @@ -1263,7 +1263,7 @@ export interface GetScreenDataRequest { ** This parameter is optional. If a nextToken is not specified, the API returns the first page of data. *
- *+ *
* Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API * will throw ValidationException. *
@@ -1304,7 +1304,7 @@ export interface ResultSet { * formats are not repeated in the rows. If a particular row does not have a value for a data cell, a blank * value is used. * - *+ *
* For example, a task list that displays the task name, due date and assigned person might have headers * [ \{ "name": "Task Name"\}, \{"name": "Due Date", "format": "DATE"\}, \{"name": "Assigned", "format": "CONTACT"\} ]. * Every row in the result will have the task name as the first item, due date as the second item and assigned @@ -1421,7 +1421,7 @@ export interface InvokeScreenAutomationRequest { * that if the first call using that request token is successfully performed, the second call will return the * response of the previous call rather than performing the action again. *
- *+ *
* Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests * spanning hours or days. *
@@ -1447,7 +1447,7 @@ export interface ListTableColumnsRequest { /** * @public *The ID of the workbook that contains the table whose columns are being retrieved.
- *+ *
* If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1456,7 +1456,7 @@ export interface ListTableColumnsRequest { /** * @public *The ID of the table whose columns are being retrieved.
- *+ *
* If a table with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1467,7 +1467,7 @@ export interface ListTableColumnsRequest { ** This parameter is optional. If a nextToken is not specified, the API returns the first page of data. *
- *+ *
* Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API * will throw ValidationException. *
@@ -1541,7 +1541,7 @@ export interface ListTableRowsRequest { /** * @public *The ID of the workbook that contains the table whose rows are being retrieved.
- *+ *
* If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1550,7 +1550,7 @@ export interface ListTableRowsRequest { /** * @public *The ID of the table whose rows are being retrieved.
- *+ *
* If a table with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1577,7 +1577,7 @@ export interface ListTableRowsRequest { ** This parameter is optional. If a nextToken is not specified, the API returns the first page of data. *
- *+ *
* Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API * will throw ValidationException. *
@@ -1660,7 +1660,7 @@ export interface ListTablesRequest { /** * @public *The ID of the workbook whose tables are being retrieved.
- *+ *
* If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1677,7 +1677,7 @@ export interface ListTablesRequest { ** This parameter is optional. If a nextToken is not specified, the API returns the first page of data. *
- *+ *
* Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API * will throw ValidationException. *
@@ -1764,7 +1764,7 @@ export interface QueryTableRowsRequest { /** * @public *The ID of the workbook whose table rows are being queried.
- *+ *
* If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1773,7 +1773,7 @@ export interface QueryTableRowsRequest { /** * @public *The ID of the table whose rows are being queried.
- *+ *
* If a table with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1797,7 +1797,7 @@ export interface QueryTableRowsRequest { ** This parameter is optional. If a nextToken is not specified, the API returns the first page of data. *
- *+ *
* Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API * will throw ValidationException. *
@@ -1852,7 +1852,7 @@ export interface StartTableDataImportJobRequest { /** * @public *The ID of the workbook where the rows are being imported.
- *+ *
* If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1878,7 +1878,7 @@ export interface StartTableDataImportJobRequest { /** * @public *The ID of the table where the rows are being imported.
- *+ *
* If a table with the specified id could not be found, this API throws ResourceNotFoundException. *
*/ @@ -1901,7 +1901,7 @@ export interface StartTableDataImportJobRequest { * that if the first call using that request token is successfully performed, the second call will not perform * the action again. * - *+ *
* Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests * spanning hours or days. *
diff --git a/clients/client-iam/src/endpoint/ruleset.ts b/clients/client-iam/src/endpoint/ruleset.ts index 870f8af4309c2..ebd9ad6767cee 100644 --- a/clients/client-iam/src/endpoint/ruleset.ts +++ b/clients/client-iam/src/endpoint/ruleset.ts @@ -39,5 +39,5 @@ w={[B]:"booleanEquals",[C]:[true,{[B]:"getAttr",[C]:[{[D]:e},"supportsDualStack" x=[l], y=[m], z=[{[D]:"Region"}]; -const _data={version:"1.0",parameters:{Region:i,UseDualStack:j,UseFIPS:j,Endpoint:i},rules:[{conditions:[{[B]:a,[C]:[k]}],type:b,rules:[{conditions:x,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:y,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:k,properties:n,headers:n},type:d}]}]},{type:b,rules:[{conditions:[{[B]:a,[C]:z}],type:b,rules:[{conditions:[{[B]:"aws.partition",[C]:z,assign:e}],type:b,rules:[{conditions:[o,q,r],endpoint:{url:"https://iam.amazonaws.com",properties:s,headers:n},type:d},{conditions:[o,l,r],endpoint:{url:"https://iam-fips.amazonaws.com",properties:s,headers:n},type:d},{conditions:[{[B]:f,[C]:[p,"aws-cn"]},q,r],endpoint:{url:"https://iam.cn-north-1.amazonaws.com.cn",properties:{[E]:[{name:g,[F]:h,[G]:"cn-north-1"}]},headers:n},type:d},{conditions:[t,q,r],endpoint:u,type:d},{conditions:[t,l,r],endpoint:u,type:d},{conditions:[{[B]:f,[C]:[p,"aws-iso"]},q,r],endpoint:{url:"https://iam.us-iso-east-1.c2s.ic.gov",properties:{[E]:[{name:g,[F]:h,[G]:"us-iso-east-1"}]},headers:n},type:d},{conditions:[{[B]:f,[C]:[p,"aws-iso-b"]},q,r],endpoint:{url:"https://iam.us-isob-east-1.sc2s.sgov.gov",properties:{[E]:[{name:g,[F]:h,[G]:"us-isob-east-1"}]},headers:n},type:d},{conditions:[l,m],type:b,rules:[{conditions:[v,w],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iam-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:n,headers:n},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:x,type:b,rules:[{conditions:[v],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iam-fips.{Region}.{PartitionResult#dnsSuffix}",properties:n,headers:n},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:y,type:b,rules:[{conditions:[w],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iam.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:n,headers:n},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://iam.{Region}.{PartitionResult#dnsSuffix}",properties:n,headers:n},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]}; +const _data={version:"1.0",parameters:{Region:i,UseDualStack:j,UseFIPS:j,Endpoint:i},rules:[{conditions:[{[B]:a,[C]:[k]}],type:b,rules:[{conditions:x,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:y,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:k,properties:n,headers:n},type:d}]},{conditions:[{[B]:a,[C]:z}],type:b,rules:[{conditions:[{[B]:"aws.partition",[C]:z,assign:e}],type:b,rules:[{conditions:[o,q,r],endpoint:{url:"https://iam.amazonaws.com",properties:s,headers:n},type:d},{conditions:[o,l,r],endpoint:{url:"https://iam-fips.amazonaws.com",properties:s,headers:n},type:d},{conditions:[{[B]:f,[C]:[p,"aws-cn"]},q,r],endpoint:{url:"https://iam.cn-north-1.amazonaws.com.cn",properties:{[E]:[{name:g,[F]:h,[G]:"cn-north-1"}]},headers:n},type:d},{conditions:[t,q,r],endpoint:u,type:d},{conditions:[t,l,r],endpoint:u,type:d},{conditions:[{[B]:f,[C]:[p,"aws-iso"]},q,r],endpoint:{url:"https://iam.us-iso-east-1.c2s.ic.gov",properties:{[E]:[{name:g,[F]:h,[G]:"us-iso-east-1"}]},headers:n},type:d},{conditions:[{[B]:f,[C]:[p,"aws-iso-b"]},q,r],endpoint:{url:"https://iam.us-isob-east-1.sc2s.sgov.gov",properties:{[E]:[{name:g,[F]:h,[G]:"us-isob-east-1"}]},headers:n},type:d},{conditions:[l,m],type:b,rules:[{conditions:[v,w],type:b,rules:[{endpoint:{url:"https://iam-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:n,headers:n},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:x,type:b,rules:[{conditions:[v],type:b,rules:[{endpoint:{url:"https://iam-fips.{Region}.{PartitionResult#dnsSuffix}",properties:n,headers:n},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:y,type:b,rules:[{conditions:[w],type:b,rules:[{endpoint:{url:"https://iam.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:n,headers:n},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://iam.{Region}.{PartitionResult#dnsSuffix}",properties:n,headers:n},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-imagebuilder/src/endpoint/ruleset.ts b/clients/client-imagebuilder/src/endpoint/ruleset.ts index 54536d104c5c4..70da301630ede 100644 --- a/clients/client-imagebuilder/src/endpoint/ruleset.ts +++ b/clients/client-imagebuilder/src/endpoint/ruleset.ts @@ -28,5 +28,5 @@ o={[t]:"booleanEquals",[u]:[true,{[t]:f,[u]:[n,"supportsDualStack"]}]}, p=[j], q=[k], r=[{[v]:"Region"}]; -const _data={version:"1.0",parameters:{Region:g,UseDualStack:h,UseFIPS:h,Endpoint:g},rules:[{conditions:[{[t]:a,[u]:[i]}],type:b,rules:[{conditions:p,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:q,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:i,properties:l,headers:l},type:d}]}]},{type:b,rules:[{conditions:[{[t]:a,[u]:r}],type:b,rules:[{conditions:[{[t]:"aws.partition",[u]:r,assign:e}],type:b,rules:[{conditions:[j,k],type:b,rules:[{conditions:[m,o],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://imagebuilder-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:p,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{conditions:[{[t]:"stringEquals",[u]:["aws-us-gov",{[t]:f,[u]:[n,"name"]}]}],endpoint:{url:"https://imagebuilder.{Region}.amazonaws.com",properties:l,headers:l},type:d},{endpoint:{url:"https://imagebuilder-fips.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:q,type:b,rules:[{conditions:[o],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://imagebuilder.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://imagebuilder.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]}; +const _data={version:"1.0",parameters:{Region:g,UseDualStack:h,UseFIPS:h,Endpoint:g},rules:[{conditions:[{[t]:a,[u]:[i]}],type:b,rules:[{conditions:p,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:q,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:i,properties:l,headers:l},type:d}]},{conditions:[{[t]:a,[u]:r}],type:b,rules:[{conditions:[{[t]:"aws.partition",[u]:r,assign:e}],type:b,rules:[{conditions:[j,k],type:b,rules:[{conditions:[m,o],type:b,rules:[{endpoint:{url:"https://imagebuilder-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:p,type:b,rules:[{conditions:[m],type:b,rules:[{conditions:[{[t]:"stringEquals",[u]:["aws-us-gov",{[t]:f,[u]:[n,"name"]}]}],endpoint:{url:"https://imagebuilder.{Region}.amazonaws.com",properties:l,headers:l},type:d},{endpoint:{url:"https://imagebuilder-fips.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:q,type:b,rules:[{conditions:[o],type:b,rules:[{endpoint:{url:"https://imagebuilder.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://imagebuilder.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-inspector/src/commands/AddAttributesToFindingsCommand.ts b/clients/client-inspector/src/commands/AddAttributesToFindingsCommand.ts index a845dcff78788..c9acbbd4489fe 100644 --- a/clients/client-inspector/src/commands/AddAttributesToFindingsCommand.ts +++ b/clients/client-inspector/src/commands/AddAttributesToFindingsCommand.ts @@ -60,7 +60,7 @@ export interface AddAttributesToFindingsCommandOutput extends AddAttributesToFin * // { // AddAttributesToFindingsResponse * // failedItems: { // FailedItems // required * // "The name of the action. The action name can be one of the following values:
*+ *
* SNOOZE
- When you snooze the alarm, the alarm state changes to SNOOZE_DISABLED
.
+ *
* ENABLE
- When you enable the alarm, the alarm state changes to NORMAL
.
+ *
* DISABLE
- When you disable the alarm, the alarm state changes to DISABLED
.
+ *
* ACKNOWLEDGE
- When you acknowledge the alarm, the alarm state changes to ACKNOWLEDGED
.
+ *
* RESET
- When you reset the alarm, the alarm state changes to NORMAL
.
For more information, see the AlarmState API.
*/ @@ -322,38 +322,38 @@ export interface AlarmState { *The name of the alarm state. The state name can be one of the following values:
*+ *
* DISABLED
- When the alarm is in the DISABLED
state,
* it isn't ready to evaluate data. To enable the alarm,
* you must change the alarm to the NORMAL
state.
+ *
* NORMAL
- When the alarm is in the NORMAL
state,
* it's ready to evaluate data.
+ *
* ACTIVE
- If the alarm is in the ACTIVE
state,
* the alarm is invoked.
+ *
* ACKNOWLEDGED
- When the alarm is in the ACKNOWLEDGED
state,
* the alarm was invoked and you acknowledged the alarm.
+ *
* SNOOZE_DISABLED
- When the alarm is in the SNOOZE_DISABLED
state,
* the alarm is disabled for a specified period of time. After the snooze time,
* the alarm automatically changes to the NORMAL
state.
+ *
* LATCHED
- When the alarm is in the LATCHED
state,
* the alarm was invoked. However, the data that the alarm is currently evaluating is within the specified range.
* To change the alarm to the NORMAL
state, you must acknowledge the alarm.
The name of the alarm state. The state name can be one of the following values:
*+ *
* DISABLED
- When the alarm is in the DISABLED
state,
* it isn't ready to evaluate data. To enable the alarm,
* you must change the alarm to the NORMAL
state.
+ *
* NORMAL
- When the alarm is in the NORMAL
state,
* it's ready to evaluate data.
+ *
* ACTIVE
- If the alarm is in the ACTIVE
state,
* the alarm is invoked.
+ *
* ACKNOWLEDGED
- When the alarm is in the ACKNOWLEDGED
state,
* the alarm was invoked and you acknowledged the alarm.
+ *
* SNOOZE_DISABLED
- When the alarm is in the SNOOZE_DISABLED
state,
* the alarm is disabled for a specified period of time. After the snooze time,
* the alarm automatically changes to the NORMAL
state.
+ *
* LATCHED
- When the alarm is in the LATCHED
state,
* the alarm was invoked. However, the data that the alarm is currently evaluating is within the specified range.
* To change the alarm to the NORMAL
state, you must acknowledge the alarm.
Traditional analytics and business intelligence tools are designed to process structured data. IoT data often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result the data from these devices can have significant gaps, corrupted messages, and false readings that must be cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data from external sources.
-IoT Analytics automates the steps required to analyze data from IoT devices. IoT Analytics filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You can set up the service to collect only the data you need from your devices, apply mathematical transforms diff --git a/clients/client-iotanalytics/src/IoTAnalytics.ts b/clients/client-iotanalytics/src/IoTAnalytics.ts index 41ace1bfd6b1e..a210ab9692e5b 100644 --- a/clients/client-iotanalytics/src/IoTAnalytics.ts +++ b/clients/client-iotanalytics/src/IoTAnalytics.ts @@ -697,13 +697,11 @@ export interface IoTAnalytics { * You can then query the data and run sophisticated analytics on it. IoT Analytics enables advanced * data exploration through integration with Jupyter Notebooks and data visualization through integration * with Amazon QuickSight.
- * *Traditional analytics and business intelligence tools are designed to process structured data. IoT data * often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result * the data from these devices can have significant gaps, corrupted messages, and false readings that must be * cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data * from external sources.
- * *IoT Analytics automates the steps required to analyze data from IoT devices. IoT Analytics * filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You * can set up the service to collect only the data you need from your devices, apply mathematical transforms diff --git a/clients/client-iotanalytics/src/IoTAnalyticsClient.ts b/clients/client-iotanalytics/src/IoTAnalyticsClient.ts index b50f01ed6adce..fad1e0739160f 100644 --- a/clients/client-iotanalytics/src/IoTAnalyticsClient.ts +++ b/clients/client-iotanalytics/src/IoTAnalyticsClient.ts @@ -371,13 +371,11 @@ export interface IoTAnalyticsClientResolvedConfig extends IoTAnalyticsClientReso * You can then query the data and run sophisticated analytics on it. IoT Analytics enables advanced * data exploration through integration with Jupyter Notebooks and data visualization through integration * with Amazon QuickSight.
- * *Traditional analytics and business intelligence tools are designed to process structured data. IoT data * often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result * the data from these devices can have significant gaps, corrupted messages, and false readings that must be * cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data * from external sources.
- * *IoT Analytics automates the steps required to analyze data from IoT devices. IoT Analytics * filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You * can set up the service to collect only the data you need from your devices, apply mathematical transforms diff --git a/clients/client-iotanalytics/src/commands/CreateDatasetCommand.ts b/clients/client-iotanalytics/src/commands/CreateDatasetCommand.ts index 4062721ef5f87..199b9c8483710 100644 --- a/clients/client-iotanalytics/src/commands/CreateDatasetCommand.ts +++ b/clients/client-iotanalytics/src/commands/CreateDatasetCommand.ts @@ -67,7 +67,7 @@ export interface CreateDatasetCommandOutput extends CreateDatasetResponse, __Met * image: "STRING_VALUE", // required * executionRoleArn: "STRING_VALUE", // required * resourceConfiguration: { // ResourceConfiguration - * computeType: "STRING_VALUE", // required + * computeType: "ACU_1" || "ACU_2", // required * volumeSizeInGB: Number("int"), // required * }, * variables: [ // Variables diff --git a/clients/client-iotanalytics/src/commands/DescribeChannelCommand.ts b/clients/client-iotanalytics/src/commands/DescribeChannelCommand.ts index 7d8651215d32c..6e15bb9108079 100644 --- a/clients/client-iotanalytics/src/commands/DescribeChannelCommand.ts +++ b/clients/client-iotanalytics/src/commands/DescribeChannelCommand.ts @@ -61,7 +61,7 @@ export interface DescribeChannelCommandOutput extends DescribeChannelResponse, _ * // }, * // }, * // arn: "STRING_VALUE", - * // status: "STRING_VALUE", + * // status: "CREATING" || "ACTIVE" || "DELETING", * // retentionPeriod: { // RetentionPeriod * // unlimited: true || false, * // numberOfDays: Number("int"), diff --git a/clients/client-iotanalytics/src/commands/DescribeDatasetCommand.ts b/clients/client-iotanalytics/src/commands/DescribeDatasetCommand.ts index 3c1c1275f6df2..ff300f7aeac39 100644 --- a/clients/client-iotanalytics/src/commands/DescribeDatasetCommand.ts +++ b/clients/client-iotanalytics/src/commands/DescribeDatasetCommand.ts @@ -70,7 +70,7 @@ export interface DescribeDatasetCommandOutput extends DescribeDatasetResponse, _ * // image: "STRING_VALUE", // required * // executionRoleArn: "STRING_VALUE", // required * // resourceConfiguration: { // ResourceConfiguration - * // computeType: "STRING_VALUE", // required + * // computeType: "ACU_1" || "ACU_2", // required * // volumeSizeInGB: Number("int"), // required * // }, * // variables: [ // Variables @@ -119,7 +119,7 @@ export interface DescribeDatasetCommandOutput extends DescribeDatasetResponse, _ * // }, * // }, * // ], - * // status: "STRING_VALUE", + * // status: "CREATING" || "ACTIVE" || "DELETING", * // creationTime: new Date("TIMESTAMP"), * // lastUpdateTime: new Date("TIMESTAMP"), * // retentionPeriod: { // RetentionPeriod diff --git a/clients/client-iotanalytics/src/commands/DescribeDatastoreCommand.ts b/clients/client-iotanalytics/src/commands/DescribeDatastoreCommand.ts index 336db002e1545..d02134a37a5be 100644 --- a/clients/client-iotanalytics/src/commands/DescribeDatastoreCommand.ts +++ b/clients/client-iotanalytics/src/commands/DescribeDatastoreCommand.ts @@ -67,7 +67,7 @@ export interface DescribeDatastoreCommandOutput extends DescribeDatastoreRespons * // }, * // }, * // arn: "STRING_VALUE", - * // status: "STRING_VALUE", + * // status: "CREATING" || "ACTIVE" || "DELETING", * // retentionPeriod: { // RetentionPeriod * // unlimited: true || false, * // numberOfDays: Number("int"), diff --git a/clients/client-iotanalytics/src/commands/DescribeLoggingOptionsCommand.ts b/clients/client-iotanalytics/src/commands/DescribeLoggingOptionsCommand.ts index 95d5d5643d2a1..03c98080be2cf 100644 --- a/clients/client-iotanalytics/src/commands/DescribeLoggingOptionsCommand.ts +++ b/clients/client-iotanalytics/src/commands/DescribeLoggingOptionsCommand.ts @@ -49,7 +49,7 @@ export interface DescribeLoggingOptionsCommandOutput extends DescribeLoggingOpti * // { // DescribeLoggingOptionsResponse * // loggingOptions: { // LoggingOptions * // roleArn: "STRING_VALUE", // required - * // level: "STRING_VALUE", // required + * // level: "ERROR", // required * // enabled: true || false, // required * // }, * // }; diff --git a/clients/client-iotanalytics/src/commands/DescribePipelineCommand.ts b/clients/client-iotanalytics/src/commands/DescribePipelineCommand.ts index 7367a766979dc..210baab81c16a 100644 --- a/clients/client-iotanalytics/src/commands/DescribePipelineCommand.ts +++ b/clients/client-iotanalytics/src/commands/DescribePipelineCommand.ts @@ -120,7 +120,7 @@ export interface DescribePipelineCommandOutput extends DescribePipelineResponse, * // reprocessingSummaries: [ // ReprocessingSummaries * // { // ReprocessingSummary * // id: "STRING_VALUE", - * // status: "STRING_VALUE", + * // status: "RUNNING" || "SUCCEEDED" || "CANCELLED" || "FAILED", * // creationTime: new Date("TIMESTAMP"), * // }, * // ], diff --git a/clients/client-iotanalytics/src/commands/GetDatasetContentCommand.ts b/clients/client-iotanalytics/src/commands/GetDatasetContentCommand.ts index 1e19b2eb497e1..9321e9e16ca95 100644 --- a/clients/client-iotanalytics/src/commands/GetDatasetContentCommand.ts +++ b/clients/client-iotanalytics/src/commands/GetDatasetContentCommand.ts @@ -58,7 +58,7 @@ export interface GetDatasetContentCommandOutput extends GetDatasetContentRespons * // ], * // timestamp: new Date("TIMESTAMP"), * // status: { // DatasetContentStatus - * // state: "STRING_VALUE", + * // state: "CREATING" || "SUCCEEDED" || "FAILED", * // reason: "STRING_VALUE", * // }, * // }; diff --git a/clients/client-iotanalytics/src/commands/ListChannelsCommand.ts b/clients/client-iotanalytics/src/commands/ListChannelsCommand.ts index 7ca6d9329e9f7..cb22b2d375609 100644 --- a/clients/client-iotanalytics/src/commands/ListChannelsCommand.ts +++ b/clients/client-iotanalytics/src/commands/ListChannelsCommand.ts @@ -61,7 +61,7 @@ export interface ListChannelsCommandOutput extends ListChannelsResponse, __Metad * // roleArn: "STRING_VALUE", * // }, * // }, - * // status: "STRING_VALUE", + * // status: "CREATING" || "ACTIVE" || "DELETING", * // creationTime: new Date("TIMESTAMP"), * // lastUpdateTime: new Date("TIMESTAMP"), * // lastMessageArrivalTime: new Date("TIMESTAMP"), diff --git a/clients/client-iotanalytics/src/commands/ListDatasetContentsCommand.ts b/clients/client-iotanalytics/src/commands/ListDatasetContentsCommand.ts index bad2344001099..13e4bbcc61c79 100644 --- a/clients/client-iotanalytics/src/commands/ListDatasetContentsCommand.ts +++ b/clients/client-iotanalytics/src/commands/ListDatasetContentsCommand.ts @@ -57,7 +57,7 @@ export interface ListDatasetContentsCommandOutput extends ListDatasetContentsRes * // { // DatasetContentSummary * // version: "STRING_VALUE", * // status: { // DatasetContentStatus - * // state: "STRING_VALUE", + * // state: "CREATING" || "SUCCEEDED" || "FAILED", * // reason: "STRING_VALUE", * // }, * // creationTime: new Date("TIMESTAMP"), diff --git a/clients/client-iotanalytics/src/commands/ListDatasetsCommand.ts b/clients/client-iotanalytics/src/commands/ListDatasetsCommand.ts index ae0206568fb8c..8694559f260a5 100644 --- a/clients/client-iotanalytics/src/commands/ListDatasetsCommand.ts +++ b/clients/client-iotanalytics/src/commands/ListDatasetsCommand.ts @@ -53,7 +53,7 @@ export interface ListDatasetsCommandOutput extends ListDatasetsResponse, __Metad * // datasetSummaries: [ // DatasetSummaries * // { // DatasetSummary * // datasetName: "STRING_VALUE", - * // status: "STRING_VALUE", + * // status: "CREATING" || "ACTIVE" || "DELETING", * // creationTime: new Date("TIMESTAMP"), * // lastUpdateTime: new Date("TIMESTAMP"), * // triggers: [ // DatasetTriggers @@ -69,7 +69,7 @@ export interface ListDatasetsCommandOutput extends ListDatasetsResponse, __Metad * // actions: [ // DatasetActionSummaries * // { // DatasetActionSummary * // actionName: "STRING_VALUE", - * // actionType: "STRING_VALUE", + * // actionType: "QUERY" || "CONTAINER", * // }, * // ], * // }, diff --git a/clients/client-iotanalytics/src/commands/ListDatastoresCommand.ts b/clients/client-iotanalytics/src/commands/ListDatastoresCommand.ts index f3deb0c3773d3..78c922c66e010 100644 --- a/clients/client-iotanalytics/src/commands/ListDatastoresCommand.ts +++ b/clients/client-iotanalytics/src/commands/ListDatastoresCommand.ts @@ -67,11 +67,11 @@ export interface ListDatastoresCommandOutput extends ListDatastoresResponse, __M * // }, * // }, * // }, - * // status: "STRING_VALUE", + * // status: "CREATING" || "ACTIVE" || "DELETING", * // creationTime: new Date("TIMESTAMP"), * // lastUpdateTime: new Date("TIMESTAMP"), * // lastMessageArrivalTime: new Date("TIMESTAMP"), - * // fileFormatType: "STRING_VALUE", + * // fileFormatType: "JSON" || "PARQUET", * // datastorePartitions: { // DatastorePartitions * // partitions: [ // Partitions * // { // DatastorePartition diff --git a/clients/client-iotanalytics/src/commands/ListPipelinesCommand.ts b/clients/client-iotanalytics/src/commands/ListPipelinesCommand.ts index cca467869e6f2..c62ddfca9a040 100644 --- a/clients/client-iotanalytics/src/commands/ListPipelinesCommand.ts +++ b/clients/client-iotanalytics/src/commands/ListPipelinesCommand.ts @@ -56,7 +56,7 @@ export interface ListPipelinesCommandOutput extends ListPipelinesResponse, __Met * // reprocessingSummaries: [ // ReprocessingSummaries * // { // ReprocessingSummary * // id: "STRING_VALUE", - * // status: "STRING_VALUE", + * // status: "RUNNING" || "SUCCEEDED" || "CANCELLED" || "FAILED", * // creationTime: new Date("TIMESTAMP"), * // }, * // ], diff --git a/clients/client-iotanalytics/src/commands/PutLoggingOptionsCommand.ts b/clients/client-iotanalytics/src/commands/PutLoggingOptionsCommand.ts index e7e78edd75a6f..5b14e040889a8 100644 --- a/clients/client-iotanalytics/src/commands/PutLoggingOptionsCommand.ts +++ b/clients/client-iotanalytics/src/commands/PutLoggingOptionsCommand.ts @@ -50,7 +50,7 @@ export interface PutLoggingOptionsCommandOutput extends __MetadataBearer {} * const input = { // PutLoggingOptionsRequest * loggingOptions: { // LoggingOptions * roleArn: "STRING_VALUE", // required - * level: "STRING_VALUE", // required + * level: "ERROR", // required * enabled: true || false, // required * }, * }; diff --git a/clients/client-iotanalytics/src/commands/UpdateDatasetCommand.ts b/clients/client-iotanalytics/src/commands/UpdateDatasetCommand.ts index 1af4c5a677a35..b023961c9edc6 100644 --- a/clients/client-iotanalytics/src/commands/UpdateDatasetCommand.ts +++ b/clients/client-iotanalytics/src/commands/UpdateDatasetCommand.ts @@ -63,7 +63,7 @@ export interface UpdateDatasetCommandOutput extends __MetadataBearer {} * image: "STRING_VALUE", // required * executionRoleArn: "STRING_VALUE", // required * resourceConfiguration: { // ResourceConfiguration - * computeType: "STRING_VALUE", // required + * computeType: "ACU_1" || "ACU_2", // required * volumeSizeInGB: Number("int"), // required * }, * variables: [ // Variables diff --git a/clients/client-iotanalytics/src/endpoint/ruleset.ts b/clients/client-iotanalytics/src/endpoint/ruleset.ts index f3cdc4fce7881..14d7e281970fe 100644 --- a/clients/client-iotanalytics/src/endpoint/ruleset.ts +++ b/clients/client-iotanalytics/src/endpoint/ruleset.ts @@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/iotanalytics.json */ -const p="required", -q="fn", -r="argv", -s="ref"; -const a="PartitionResult", +const q="required", +r="fn", +s="argv", +t="ref"; +const a="isSet", b="tree", c="error", d="endpoint", -e={[p]:false,"type":"String"}, -f={[p]:true,"default":false,"type":"Boolean"}, -g={[s]:"Endpoint"}, -h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]}, -i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]}, -j={}, -k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]}, -l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]}, -m=[g], -n=[h], -o=[i]; -const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://iotanalytics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iotanalytics-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://iotanalytics.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://iotanalytics.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]}; +e="PartitionResult", +f={[q]:false,"type":"String"}, +g={[q]:true,"default":false,"type":"Boolean"}, +h={[t]:"Endpoint"}, +i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]}, +j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]}, +k={}, +l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]}, +m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]}, +n=[i], +o=[j], +p=[{[t]:"Region"}]; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://iotanalytics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://iotanalytics-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://iotanalytics.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://iotanalytics.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-iotanalytics/src/index.ts b/clients/client-iotanalytics/src/index.ts index 7113efd6ed4f1..d35cbceebead3 100644 --- a/clients/client-iotanalytics/src/index.ts +++ b/clients/client-iotanalytics/src/index.ts @@ -5,13 +5,11 @@ * You can then query the data and run sophisticated analytics on it. IoT Analytics enables advanced * data exploration through integration with Jupyter Notebooks and data visualization through integration * with Amazon QuickSight.
- * *Traditional analytics and business intelligence tools are designed to process structured data. IoT data * often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result * the data from these devices can have significant gaps, corrupted messages, and false readings that must be * cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data * from external sources.
- * *IoT Analytics automates the steps required to analyze data from IoT devices. IoT Analytics * filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You * can set up the service to collect only the data you need from your devices, apply mathematical transforms diff --git a/clients/client-iotanalytics/src/models/models_0.ts b/clients/client-iotanalytics/src/models/models_0.ts index f9358a4f03b8f..85ad25559a716 100644 --- a/clients/client-iotanalytics/src/models/models_0.ts +++ b/clients/client-iotanalytics/src/models/models_0.ts @@ -68,7 +68,7 @@ export interface BatchPutMessageRequest { *
The list of messages to be sent. Each message has the format: \{ "messageId": "string", * "payload": "string"\}.
*The field names of message payloads (data) that you send to IoT Analytics:
- *Must contain only alphanumeric characters and undescores (_). No other special characters are * allowed.
diff --git a/clients/client-iotdeviceadvisor/src/endpoint/ruleset.ts b/clients/client-iotdeviceadvisor/src/endpoint/ruleset.ts index d5a9ae765d40f..dab014befec53 100644 --- a/clients/client-iotdeviceadvisor/src/endpoint/ruleset.ts +++ b/clients/client-iotdeviceadvisor/src/endpoint/ruleset.ts @@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack" n=[i], o=[j], p=[{[t]:"Region"}]; -const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://api.iotdeviceadvisor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://api.iotdeviceadvisor-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://api.iotdeviceadvisor.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://api.iotdeviceadvisor.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]}; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://api.iotdeviceadvisor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://api.iotdeviceadvisor-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://api.iotdeviceadvisor.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://api.iotdeviceadvisor.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-iotfleethub/src/commands/DescribeApplicationCommand.ts b/clients/client-iotfleethub/src/commands/DescribeApplicationCommand.ts index 145333a7c5bd9..9d34a105bd04e 100644 --- a/clients/client-iotfleethub/src/commands/DescribeApplicationCommand.ts +++ b/clients/client-iotfleethub/src/commands/DescribeApplicationCommand.ts @@ -57,7 +57,7 @@ export interface DescribeApplicationCommandOutput extends DescribeApplicationRes * // applicationName: "STRING_VALUE", // required * // applicationDescription: "STRING_VALUE", * // applicationUrl: "STRING_VALUE", // required - * // applicationState: "STRING_VALUE", // required + * // applicationState: "CREATING" || "DELETING" || "ACTIVE" || "CREATE_FAILED" || "DELETE_FAILED", // required * // applicationCreationDate: Number("long"), // required * // applicationLastUpdateDate: Number("long"), // required * // roleArn: "STRING_VALUE", // required diff --git a/clients/client-iotfleethub/src/commands/ListApplicationsCommand.ts b/clients/client-iotfleethub/src/commands/ListApplicationsCommand.ts index 5002e3a3aa622..e1d68e152bbc7 100644 --- a/clients/client-iotfleethub/src/commands/ListApplicationsCommand.ts +++ b/clients/client-iotfleethub/src/commands/ListApplicationsCommand.ts @@ -60,7 +60,7 @@ export interface ListApplicationsCommandOutput extends ListApplicationsResponse, * // applicationUrl: "STRING_VALUE", // required * // applicationCreationDate: Number("long"), * // applicationLastUpdateDate: Number("long"), - * // applicationState: "STRING_VALUE", + * // applicationState: "CREATING" || "DELETING" || "ACTIVE" || "CREATE_FAILED" || "DELETE_FAILED", * // }, * // ], * // nextToken: "STRING_VALUE", diff --git a/clients/client-iotfleethub/src/endpoint/ruleset.ts b/clients/client-iotfleethub/src/endpoint/ruleset.ts index 36375776dbdbd..f8c53cb81750c 100644 --- a/clients/client-iotfleethub/src/endpoint/ruleset.ts +++ b/clients/client-iotfleethub/src/endpoint/ruleset.ts @@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/iotfleethub.json */ -const p="required", -q="fn", -r="argv", -s="ref"; -const a="PartitionResult", +const q="required", +r="fn", +s="argv", +t="ref"; +const a="isSet", b="tree", c="error", d="endpoint", -e={[p]:false,"type":"String"}, -f={[p]:true,"default":false,"type":"Boolean"}, -g={[s]:"Endpoint"}, -h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]}, -i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]}, -j={}, -k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]}, -l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]}, -m=[g], -n=[h], -o=[i]; -const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://api.fleethub.iot-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://api.fleethub.iot-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://api.fleethub.iot.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://api.fleethub.iot.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]}; +e="PartitionResult", +f={[q]:false,"type":"String"}, +g={[q]:true,"default":false,"type":"Boolean"}, +h={[t]:"Endpoint"}, +i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]}, +j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]}, +k={}, +l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]}, +m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]}, +n=[i], +o=[j], +p=[{[t]:"Region"}]; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://api.fleethub.iot-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://api.fleethub.iot-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://api.fleethub.iot.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://api.fleethub.iot.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-iotfleetwise/src/endpoint/ruleset.ts b/clients/client-iotfleetwise/src/endpoint/ruleset.ts index 12c9e9fad0db8..1f78038c1c97a 100644 --- a/clients/client-iotfleetwise/src/endpoint/ruleset.ts +++ b/clients/client-iotfleetwise/src/endpoint/ruleset.ts @@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack" n=[i], o=[j], p=[{[t]:"Region"}]; -const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iotfleetwise-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iotfleetwise-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iotfleetwise.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://iotfleetwise.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]}; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://iotfleetwise-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://iotfleetwise-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://iotfleetwise.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://iotfleetwise.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-iotsecuretunneling/src/IoTSecureTunneling.ts b/clients/client-iotsecuretunneling/src/IoTSecureTunneling.ts index ab96941d97aec..32875349acbc3 100644 --- a/clients/client-iotsecuretunneling/src/IoTSecureTunneling.ts +++ b/clients/client-iotsecuretunneling/src/IoTSecureTunneling.ts @@ -147,9 +147,9 @@ export interface IoTSecureTunneling { /** * @public *IoT Secure Tunneling creates remote connections to devices deployed in the + *
IoT Secure Tunneling creates remote connections to devices deployed in the * field.
- *For more information about how IoT Secure Tunneling works, see IoT
+ * For more information about how IoT Secure Tunneling works, see IoT
* Secure Tunneling. IoT Secure Tunneling creates remote connections to devices deployed in the
+ * IoT Secure Tunneling creates remote connections to devices deployed in the
* field. For more information about how IoT Secure Tunneling works, see IoT
+ * For more information about how IoT Secure Tunneling works, see IoT
* Secure Tunneling. Closes a tunnel identified by the unique tunnel id. When a Requires permission to access the CloseTunnel action. Requires permission to access the CloseTunnel action. Gets information about a tunnel identified by the unique tunnel id. Requires permission to access the DescribeTunnel action. Requires permission to access the DescribeTunnel action. List all tunnels for an Amazon Web Services account. Tunnels are listed by creation time in
* descending order, newer tunnels will be listed before older tunnels. Requires permission to access the ListTunnels action. Requires permission to access the ListTunnels action. Creates a new tunnel, and returns two client access tokens for clients to use to
* connect to the IoT Secure Tunneling proxy server. Requires permission to access the OpenTunnel action. Requires permission to access the OpenTunnel action. Revokes the current client access token (CAT) and returns new CAT for clients to
* use when reconnecting to secure tunneling to access the same tunnel. Requires permission to access the RotateTunnelAccessToken action. Rotating the CAT doesn't extend the tunnel duration. For example, say the tunnel
+ * Requires permission to access the RotateTunnelAccessToken action. Rotating the CAT doesn't extend the tunnel duration. For example, say the tunnel
* duration is 12 hours and the tunnel has already been open for 4 hours. When you
* rotate the access tokens, the new tokens that are generated can only be used for the
* remaining 8 hours. IoT Secure Tunneling creates remote connections to devices deployed in the
+ * IoT Secure Tunneling creates remote connections to devices deployed in the
* field. For more information about how IoT Secure Tunneling works, see IoT
+ * For more information about how IoT Secure Tunneling works, see IoT
* Secure Tunneling. For more information about how AWS IoT Things Graph works, see the User Guide. The AWS IoT Things Graph service is discontinued. For more information about how AWS IoT Things Graph works, see the User Guide. The AWS IoT Things Graph service is discontinued.CloseTunnel
* request is received, we close the WebSocket connections between the client and proxy
* server so no data can be transmitted.
For more information about how AWS IoT Things Graph works, see the User Guide.
- * *The AWS IoT Things Graph service is discontinued.
*/ export class IoTThingsGraphClient extends __Client< diff --git a/clients/client-iotthingsgraph/src/commands/CreateFlowTemplateCommand.ts b/clients/client-iotthingsgraph/src/commands/CreateFlowTemplateCommand.ts index 71424016671da..9548bda7b8556 100644 --- a/clients/client-iotthingsgraph/src/commands/CreateFlowTemplateCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/CreateFlowTemplateCommand.ts @@ -49,7 +49,7 @@ export interface CreateFlowTemplateCommandOutput extends CreateFlowTemplateRespo * const client = new IoTThingsGraphClient(config); * const input = { // CreateFlowTemplateRequest * definition: { // DefinitionDocument - * language: "STRING_VALUE", // required + * language: "GRAPHQL", // required * text: "STRING_VALUE", // required * }, * compatibleNamespaceVersion: Number("long"), diff --git a/clients/client-iotthingsgraph/src/commands/CreateSystemInstanceCommand.ts b/clients/client-iotthingsgraph/src/commands/CreateSystemInstanceCommand.ts index fc7d0eebcb115..fea02ec411eab 100644 --- a/clients/client-iotthingsgraph/src/commands/CreateSystemInstanceCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/CreateSystemInstanceCommand.ts @@ -61,10 +61,10 @@ export interface CreateSystemInstanceCommandOutput extends CreateSystemInstanceR * }, * ], * definition: { // DefinitionDocument - * language: "STRING_VALUE", // required + * language: "GRAPHQL", // required * text: "STRING_VALUE", // required * }, - * target: "STRING_VALUE", // required + * target: "GREENGRASS" || "CLOUD", // required * greengrassGroupName: "STRING_VALUE", * s3BucketName: "STRING_VALUE", * metricsConfiguration: { // MetricsConfiguration @@ -79,8 +79,8 @@ export interface CreateSystemInstanceCommandOutput extends CreateSystemInstanceR * // summary: { // SystemInstanceSummary * // id: "STRING_VALUE", * // arn: "STRING_VALUE", - * // status: "STRING_VALUE", - * // target: "STRING_VALUE", + * // status: "NOT_DEPLOYED" || "BOOTSTRAP" || "DEPLOY_IN_PROGRESS" || "DEPLOYED_IN_TARGET" || "UNDEPLOY_IN_PROGRESS" || "FAILED" || "PENDING_DELETE" || "DELETED_IN_TARGET", + * // target: "GREENGRASS" || "CLOUD", * // greengrassGroupName: "STRING_VALUE", * // createdAt: new Date("TIMESTAMP"), * // updatedAt: new Date("TIMESTAMP"), diff --git a/clients/client-iotthingsgraph/src/commands/CreateSystemTemplateCommand.ts b/clients/client-iotthingsgraph/src/commands/CreateSystemTemplateCommand.ts index dfa1669a2c2b0..c641ab9629119 100644 --- a/clients/client-iotthingsgraph/src/commands/CreateSystemTemplateCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/CreateSystemTemplateCommand.ts @@ -48,7 +48,7 @@ export interface CreateSystemTemplateCommandOutput extends CreateSystemTemplateR * const client = new IoTThingsGraphClient(config); * const input = { // CreateSystemTemplateRequest * definition: { // DefinitionDocument - * language: "STRING_VALUE", // required + * language: "GRAPHQL", // required * text: "STRING_VALUE", // required * }, * compatibleNamespaceVersion: Number("long"), diff --git a/clients/client-iotthingsgraph/src/commands/DeploySystemInstanceCommand.ts b/clients/client-iotthingsgraph/src/commands/DeploySystemInstanceCommand.ts index d64538c60cc5c..d729635244b0f 100644 --- a/clients/client-iotthingsgraph/src/commands/DeploySystemInstanceCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/DeploySystemInstanceCommand.ts @@ -65,8 +65,8 @@ export interface DeploySystemInstanceCommandOutput extends DeploySystemInstanceR * // summary: { // SystemInstanceSummary * // id: "STRING_VALUE", * // arn: "STRING_VALUE", - * // status: "STRING_VALUE", - * // target: "STRING_VALUE", + * // status: "NOT_DEPLOYED" || "BOOTSTRAP" || "DEPLOY_IN_PROGRESS" || "DEPLOYED_IN_TARGET" || "UNDEPLOY_IN_PROGRESS" || "FAILED" || "PENDING_DELETE" || "DELETED_IN_TARGET", + * // target: "GREENGRASS" || "CLOUD", * // greengrassGroupName: "STRING_VALUE", * // createdAt: new Date("TIMESTAMP"), * // updatedAt: new Date("TIMESTAMP"), diff --git a/clients/client-iotthingsgraph/src/commands/DissociateEntityFromThingCommand.ts b/clients/client-iotthingsgraph/src/commands/DissociateEntityFromThingCommand.ts index 5201acf00c2e4..cdddcf618d8f0 100644 --- a/clients/client-iotthingsgraph/src/commands/DissociateEntityFromThingCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/DissociateEntityFromThingCommand.ts @@ -48,7 +48,7 @@ export interface DissociateEntityFromThingCommandOutput extends DissociateEntity * const client = new IoTThingsGraphClient(config); * const input = { // DissociateEntityFromThingRequest * thingName: "STRING_VALUE", // required - * entityType: "STRING_VALUE", // required + * entityType: "DEVICE" || "SERVICE" || "DEVICE_MODEL" || "CAPABILITY" || "STATE" || "ACTION" || "EVENT" || "PROPERTY" || "MAPPING" || "ENUM", // required * }; * const command = new DissociateEntityFromThingCommand(input); * const response = await client.send(command); diff --git a/clients/client-iotthingsgraph/src/commands/GetEntitiesCommand.ts b/clients/client-iotthingsgraph/src/commands/GetEntitiesCommand.ts index b59f564653607..befdce7fd892b 100644 --- a/clients/client-iotthingsgraph/src/commands/GetEntitiesCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/GetEntitiesCommand.ts @@ -89,10 +89,10 @@ export interface GetEntitiesCommandOutput extends GetEntitiesResponse, __Metadat * // { // EntityDescription * // id: "STRING_VALUE", * // arn: "STRING_VALUE", - * // type: "STRING_VALUE", + * // type: "DEVICE" || "SERVICE" || "DEVICE_MODEL" || "CAPABILITY" || "STATE" || "ACTION" || "EVENT" || "PROPERTY" || "MAPPING" || "ENUM", * // createdAt: new Date("TIMESTAMP"), * // definition: { // DefinitionDocument - * // language: "STRING_VALUE", // required + * // language: "GRAPHQL", // required * // text: "STRING_VALUE", // required * // }, * // }, diff --git a/clients/client-iotthingsgraph/src/commands/GetFlowTemplateCommand.ts b/clients/client-iotthingsgraph/src/commands/GetFlowTemplateCommand.ts index e030933bb866e..9fd8845ae5af5 100644 --- a/clients/client-iotthingsgraph/src/commands/GetFlowTemplateCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/GetFlowTemplateCommand.ts @@ -60,7 +60,7 @@ export interface GetFlowTemplateCommandOutput extends GetFlowTemplateResponse, _ * // createdAt: new Date("TIMESTAMP"), * // }, * // definition: { // DefinitionDocument - * // language: "STRING_VALUE", // required + * // language: "GRAPHQL", // required * // text: "STRING_VALUE", // required * // }, * // validatedNamespaceVersion: Number("long"), diff --git a/clients/client-iotthingsgraph/src/commands/GetNamespaceDeletionStatusCommand.ts b/clients/client-iotthingsgraph/src/commands/GetNamespaceDeletionStatusCommand.ts index e4ec347196bb5..87111832236bf 100644 --- a/clients/client-iotthingsgraph/src/commands/GetNamespaceDeletionStatusCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/GetNamespaceDeletionStatusCommand.ts @@ -51,8 +51,8 @@ export interface GetNamespaceDeletionStatusCommandOutput extends GetNamespaceDel * // { // GetNamespaceDeletionStatusResponse * // namespaceArn: "STRING_VALUE", * // namespaceName: "STRING_VALUE", - * // status: "STRING_VALUE", - * // errorCode: "STRING_VALUE", + * // status: "IN_PROGRESS" || "SUCCEEDED" || "FAILED", + * // errorCode: "VALIDATION_FAILED", * // errorMessage: "STRING_VALUE", * // }; * diff --git a/clients/client-iotthingsgraph/src/commands/GetSystemInstanceCommand.ts b/clients/client-iotthingsgraph/src/commands/GetSystemInstanceCommand.ts index 88c6ad3ee2a09..f5af376dc449c 100644 --- a/clients/client-iotthingsgraph/src/commands/GetSystemInstanceCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/GetSystemInstanceCommand.ts @@ -55,8 +55,8 @@ export interface GetSystemInstanceCommandOutput extends GetSystemInstanceRespons * // summary: { // SystemInstanceSummary * // id: "STRING_VALUE", * // arn: "STRING_VALUE", - * // status: "STRING_VALUE", - * // target: "STRING_VALUE", + * // status: "NOT_DEPLOYED" || "BOOTSTRAP" || "DEPLOY_IN_PROGRESS" || "DEPLOYED_IN_TARGET" || "UNDEPLOY_IN_PROGRESS" || "FAILED" || "PENDING_DELETE" || "DELETED_IN_TARGET", + * // target: "GREENGRASS" || "CLOUD", * // greengrassGroupName: "STRING_VALUE", * // createdAt: new Date("TIMESTAMP"), * // updatedAt: new Date("TIMESTAMP"), @@ -64,7 +64,7 @@ export interface GetSystemInstanceCommandOutput extends GetSystemInstanceRespons * // greengrassGroupVersionId: "STRING_VALUE", * // }, * // definition: { // DefinitionDocument - * // language: "STRING_VALUE", // required + * // language: "GRAPHQL", // required * // text: "STRING_VALUE", // required * // }, * // s3BucketName: "STRING_VALUE", diff --git a/clients/client-iotthingsgraph/src/commands/GetSystemTemplateCommand.ts b/clients/client-iotthingsgraph/src/commands/GetSystemTemplateCommand.ts index 843ea013ee5b4..211c9c27145f9 100644 --- a/clients/client-iotthingsgraph/src/commands/GetSystemTemplateCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/GetSystemTemplateCommand.ts @@ -60,7 +60,7 @@ export interface GetSystemTemplateCommandOutput extends GetSystemTemplateRespons * // createdAt: new Date("TIMESTAMP"), * // }, * // definition: { // DefinitionDocument - * // language: "STRING_VALUE", // required + * // language: "GRAPHQL", // required * // text: "STRING_VALUE", // required * // }, * // validatedNamespaceVersion: Number("long"), diff --git a/clients/client-iotthingsgraph/src/commands/GetUploadStatusCommand.ts b/clients/client-iotthingsgraph/src/commands/GetUploadStatusCommand.ts index 53123bbf912e9..df0ef2cd5f642 100644 --- a/clients/client-iotthingsgraph/src/commands/GetUploadStatusCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/GetUploadStatusCommand.ts @@ -52,7 +52,7 @@ export interface GetUploadStatusCommandOutput extends GetUploadStatusResponse, _ * const response = await client.send(command); * // { // GetUploadStatusResponse * // uploadId: "STRING_VALUE", // required - * // uploadStatus: "STRING_VALUE", // required + * // uploadStatus: "IN_PROGRESS" || "SUCCEEDED" || "FAILED", // required * // namespaceArn: "STRING_VALUE", * // namespaceName: "STRING_VALUE", * // namespaceVersion: Number("long"), diff --git a/clients/client-iotthingsgraph/src/commands/ListFlowExecutionMessagesCommand.ts b/clients/client-iotthingsgraph/src/commands/ListFlowExecutionMessagesCommand.ts index 5a4a53822af42..9abf81ab9e838 100644 --- a/clients/client-iotthingsgraph/src/commands/ListFlowExecutionMessagesCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/ListFlowExecutionMessagesCommand.ts @@ -56,7 +56,7 @@ export interface ListFlowExecutionMessagesCommandOutput extends ListFlowExecutio * // messages: [ // FlowExecutionMessages * // { // FlowExecutionMessage * // messageId: "STRING_VALUE", - * // eventType: "STRING_VALUE", + * // eventType: "EXECUTION_STARTED" || "EXECUTION_FAILED" || "EXECUTION_ABORTED" || "EXECUTION_SUCCEEDED" || "STEP_STARTED" || "STEP_FAILED" || "STEP_SUCCEEDED" || "ACTIVITY_SCHEDULED" || "ACTIVITY_STARTED" || "ACTIVITY_FAILED" || "ACTIVITY_SUCCEEDED" || "START_FLOW_EXECUTION_TASK" || "SCHEDULE_NEXT_READY_STEPS_TASK" || "THING_ACTION_TASK" || "THING_ACTION_TASK_FAILED" || "THING_ACTION_TASK_SUCCEEDED" || "ACKNOWLEDGE_TASK_MESSAGE", * // timestamp: new Date("TIMESTAMP"), * // payload: "STRING_VALUE", * // }, diff --git a/clients/client-iotthingsgraph/src/commands/SearchEntitiesCommand.ts b/clients/client-iotthingsgraph/src/commands/SearchEntitiesCommand.ts index b5a29114510b1..0a6dd6c2550a3 100644 --- a/clients/client-iotthingsgraph/src/commands/SearchEntitiesCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/SearchEntitiesCommand.ts @@ -47,11 +47,11 @@ export interface SearchEntitiesCommandOutput extends SearchEntitiesResponse, __M * const client = new IoTThingsGraphClient(config); * const input = { // SearchEntitiesRequest * entityTypes: [ // EntityTypes // required - * "STRING_VALUE", + * "DEVICE" || "SERVICE" || "DEVICE_MODEL" || "CAPABILITY" || "STATE" || "ACTION" || "EVENT" || "PROPERTY" || "MAPPING" || "ENUM", * ], * filters: [ // EntityFilters * { // EntityFilter - * name: "STRING_VALUE", + * name: "NAME" || "NAMESPACE" || "SEMANTIC_TYPE_PATH" || "REFERENCED_ENTITY_ID", * value: [ // EntityFilterValues * "STRING_VALUE", * ], @@ -68,10 +68,10 @@ export interface SearchEntitiesCommandOutput extends SearchEntitiesResponse, __M * // { // EntityDescription * // id: "STRING_VALUE", * // arn: "STRING_VALUE", - * // type: "STRING_VALUE", + * // type: "DEVICE" || "SERVICE" || "DEVICE_MODEL" || "CAPABILITY" || "STATE" || "ACTION" || "EVENT" || "PROPERTY" || "MAPPING" || "ENUM", * // createdAt: new Date("TIMESTAMP"), * // definition: { // DefinitionDocument - * // language: "STRING_VALUE", // required + * // language: "GRAPHQL", // required * // text: "STRING_VALUE", // required * // }, * // }, diff --git a/clients/client-iotthingsgraph/src/commands/SearchFlowExecutionsCommand.ts b/clients/client-iotthingsgraph/src/commands/SearchFlowExecutionsCommand.ts index ced3041cb5620..92665e70e1c71 100644 --- a/clients/client-iotthingsgraph/src/commands/SearchFlowExecutionsCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/SearchFlowExecutionsCommand.ts @@ -59,7 +59,7 @@ export interface SearchFlowExecutionsCommandOutput extends SearchFlowExecutionsR * // summaries: [ // FlowExecutionSummaries * // { // FlowExecutionSummary * // flowExecutionId: "STRING_VALUE", - * // status: "STRING_VALUE", + * // status: "RUNNING" || "ABORTED" || "SUCCEEDED" || "FAILED", * // systemInstanceId: "STRING_VALUE", * // flowTemplateId: "STRING_VALUE", * // createdAt: new Date("TIMESTAMP"), diff --git a/clients/client-iotthingsgraph/src/commands/SearchFlowTemplatesCommand.ts b/clients/client-iotthingsgraph/src/commands/SearchFlowTemplatesCommand.ts index aa201b43b11a0..de05942c9d3f4 100644 --- a/clients/client-iotthingsgraph/src/commands/SearchFlowTemplatesCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/SearchFlowTemplatesCommand.ts @@ -48,7 +48,7 @@ export interface SearchFlowTemplatesCommandOutput extends SearchFlowTemplatesRes * const input = { // SearchFlowTemplatesRequest * filters: [ // FlowTemplateFilters * { // FlowTemplateFilter - * name: "STRING_VALUE", // required + * name: "DEVICE_MODEL_ID", // required * value: [ // FlowTemplateFilterValues // required * "STRING_VALUE", * ], diff --git a/clients/client-iotthingsgraph/src/commands/SearchSystemInstancesCommand.ts b/clients/client-iotthingsgraph/src/commands/SearchSystemInstancesCommand.ts index 53d6e76f3b224..cbe8f297045c3 100644 --- a/clients/client-iotthingsgraph/src/commands/SearchSystemInstancesCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/SearchSystemInstancesCommand.ts @@ -48,7 +48,7 @@ export interface SearchSystemInstancesCommandOutput extends SearchSystemInstance * const input = { // SearchSystemInstancesRequest * filters: [ // SystemInstanceFilters * { // SystemInstanceFilter - * name: "STRING_VALUE", + * name: "SYSTEM_TEMPLATE_ID" || "STATUS" || "GREENGRASS_GROUP_NAME", * value: [ // SystemInstanceFilterValues * "STRING_VALUE", * ], @@ -64,8 +64,8 @@ export interface SearchSystemInstancesCommandOutput extends SearchSystemInstance * // { // SystemInstanceSummary * // id: "STRING_VALUE", * // arn: "STRING_VALUE", - * // status: "STRING_VALUE", - * // target: "STRING_VALUE", + * // status: "NOT_DEPLOYED" || "BOOTSTRAP" || "DEPLOY_IN_PROGRESS" || "DEPLOYED_IN_TARGET" || "UNDEPLOY_IN_PROGRESS" || "FAILED" || "PENDING_DELETE" || "DELETED_IN_TARGET", + * // target: "GREENGRASS" || "CLOUD", * // greengrassGroupName: "STRING_VALUE", * // createdAt: new Date("TIMESTAMP"), * // updatedAt: new Date("TIMESTAMP"), diff --git a/clients/client-iotthingsgraph/src/commands/SearchSystemTemplatesCommand.ts b/clients/client-iotthingsgraph/src/commands/SearchSystemTemplatesCommand.ts index 2ee45479a3300..b26a9a040f919 100644 --- a/clients/client-iotthingsgraph/src/commands/SearchSystemTemplatesCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/SearchSystemTemplatesCommand.ts @@ -48,7 +48,7 @@ export interface SearchSystemTemplatesCommandOutput extends SearchSystemTemplate * const input = { // SearchSystemTemplatesRequest * filters: [ // SystemTemplateFilters * { // SystemTemplateFilter - * name: "STRING_VALUE", // required + * name: "FLOW_TEMPLATE_ID", // required * value: [ // SystemTemplateFilterValues // required * "STRING_VALUE", * ], diff --git a/clients/client-iotthingsgraph/src/commands/UndeploySystemInstanceCommand.ts b/clients/client-iotthingsgraph/src/commands/UndeploySystemInstanceCommand.ts index 892016f610232..5183d0673b63b 100644 --- a/clients/client-iotthingsgraph/src/commands/UndeploySystemInstanceCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/UndeploySystemInstanceCommand.ts @@ -54,8 +54,8 @@ export interface UndeploySystemInstanceCommandOutput extends UndeploySystemInsta * // summary: { // SystemInstanceSummary * // id: "STRING_VALUE", * // arn: "STRING_VALUE", - * // status: "STRING_VALUE", - * // target: "STRING_VALUE", + * // status: "NOT_DEPLOYED" || "BOOTSTRAP" || "DEPLOY_IN_PROGRESS" || "DEPLOYED_IN_TARGET" || "UNDEPLOY_IN_PROGRESS" || "FAILED" || "PENDING_DELETE" || "DELETED_IN_TARGET", + * // target: "GREENGRASS" || "CLOUD", * // greengrassGroupName: "STRING_VALUE", * // createdAt: new Date("TIMESTAMP"), * // updatedAt: new Date("TIMESTAMP"), diff --git a/clients/client-iotthingsgraph/src/commands/UpdateFlowTemplateCommand.ts b/clients/client-iotthingsgraph/src/commands/UpdateFlowTemplateCommand.ts index 49441b40cda28..1d68075d9fd7e 100644 --- a/clients/client-iotthingsgraph/src/commands/UpdateFlowTemplateCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/UpdateFlowTemplateCommand.ts @@ -49,7 +49,7 @@ export interface UpdateFlowTemplateCommandOutput extends UpdateFlowTemplateRespo * const input = { // UpdateFlowTemplateRequest * id: "STRING_VALUE", // required * definition: { // DefinitionDocument - * language: "STRING_VALUE", // required + * language: "GRAPHQL", // required * text: "STRING_VALUE", // required * }, * compatibleNamespaceVersion: Number("long"), diff --git a/clients/client-iotthingsgraph/src/commands/UpdateSystemTemplateCommand.ts b/clients/client-iotthingsgraph/src/commands/UpdateSystemTemplateCommand.ts index df8d7683c4fbf..040bf83f3294d 100644 --- a/clients/client-iotthingsgraph/src/commands/UpdateSystemTemplateCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/UpdateSystemTemplateCommand.ts @@ -48,7 +48,7 @@ export interface UpdateSystemTemplateCommandOutput extends UpdateSystemTemplateR * const input = { // UpdateSystemTemplateRequest * id: "STRING_VALUE", // required * definition: { // DefinitionDocument - * language: "STRING_VALUE", // required + * language: "GRAPHQL", // required * text: "STRING_VALUE", // required * }, * compatibleNamespaceVersion: Number("long"), diff --git a/clients/client-iotthingsgraph/src/commands/UploadEntityDefinitionsCommand.ts b/clients/client-iotthingsgraph/src/commands/UploadEntityDefinitionsCommand.ts index 450bc55c8ace8..14ac427f668a0 100644 --- a/clients/client-iotthingsgraph/src/commands/UploadEntityDefinitionsCommand.ts +++ b/clients/client-iotthingsgraph/src/commands/UploadEntityDefinitionsCommand.ts @@ -57,7 +57,7 @@ export interface UploadEntityDefinitionsCommandOutput extends UploadEntityDefini * const client = new IoTThingsGraphClient(config); * const input = { // UploadEntityDefinitionsRequest * document: { // DefinitionDocument - * language: "STRING_VALUE", // required + * language: "GRAPHQL", // required * text: "STRING_VALUE", // required * }, * syncWithPublicNamespace: true || false, diff --git a/clients/client-iotthingsgraph/src/endpoint/ruleset.ts b/clients/client-iotthingsgraph/src/endpoint/ruleset.ts index 08e8c0e5f968b..28167deddbbfa 100644 --- a/clients/client-iotthingsgraph/src/endpoint/ruleset.ts +++ b/clients/client-iotthingsgraph/src/endpoint/ruleset.ts @@ -6,24 +6,27 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/iotthingsgraph.json */ -const p="required", -q="fn", -r="argv", -s="ref"; -const a="PartitionResult", +const s="required", +t="fn", +u="argv", +v="ref"; +const a="isSet", b="tree", c="error", d="endpoint", -e={[p]:false,"type":"String"}, -f={[p]:true,"default":false,"type":"Boolean"}, -g={[s]:"Endpoint"}, -h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]}, -i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]}, -j={}, -k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]}, -l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]}, -m=[g], -n=[h], -o=[i]; -const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://iotthingsgraph-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iotthingsgraph-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://iotthingsgraph.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://iotthingsgraph.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]}; +e="PartitionResult", +f="getAttr", +g={[s]:false,"type":"String"}, +h={[s]:true,"default":false,"type":"Boolean"}, +i={[v]:"Endpoint"}, +j={[t]:"booleanEquals",[u]:[{[v]:"UseFIPS"},true]}, +k={[t]:"booleanEquals",[u]:[{[v]:"UseDualStack"},true]}, +l={}, +m={[t]:"booleanEquals",[u]:[true,{[t]:f,[u]:[{[v]:e},"supportsFIPS"]}]}, +n={[v]:e}, +o={[t]:"booleanEquals",[u]:[true,{[t]:f,[u]:[n,"supportsDualStack"]}]}, +p=[j], +q=[k], +r=[{[v]:"Region"}]; +const _data={version:"1.0",parameters:{Region:g,UseDualStack:h,UseFIPS:h,Endpoint:g},rules:[{conditions:[{[t]:a,[u]:[i]}],type:b,rules:[{conditions:p,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:q,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:i,properties:l,headers:l},type:d}]},{conditions:[{[t]:a,[u]:r}],type:b,rules:[{conditions:[{[t]:"aws.partition",[u]:r,assign:e}],type:b,rules:[{conditions:[j,k],type:b,rules:[{conditions:[m,o],type:b,rules:[{endpoint:{url:"https://iotthingsgraph-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:p,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://iotthingsgraph-fips.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:q,type:b,rules:[{conditions:[o],type:b,rules:[{endpoint:{url:"https://iotthingsgraph.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{conditions:[{[t]:"stringEquals",[u]:["aws",{[t]:f,[u]:[n,"name"]}]}],endpoint:{url:"https://iotthingsgraph.{Region}.amazonaws.com",properties:l,headers:l},type:d},{endpoint:{url:"https://iotthingsgraph.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-iotthingsgraph/src/index.ts b/clients/client-iotthingsgraph/src/index.ts index 9b2fbad476cfa..6253d61723e4f 100644 --- a/clients/client-iotthingsgraph/src/index.ts +++ b/clients/client-iotthingsgraph/src/index.ts @@ -6,7 +6,6 @@ * such as units of measure and communication protocols. AWS IoT Things Graph makes it possible to build IoT applications with little to no code by connecting devices and services * and defining how they interact at an abstract level. *For more information about how AWS IoT Things Graph works, see the User Guide.
- * *The AWS IoT Things Graph service is discontinued.
* * @packageDocumentation diff --git a/clients/client-iottwinmaker/src/endpoint/ruleset.ts b/clients/client-iottwinmaker/src/endpoint/ruleset.ts index 610eb88f939fe..489e1e9a59b0f 100644 --- a/clients/client-iottwinmaker/src/endpoint/ruleset.ts +++ b/clients/client-iottwinmaker/src/endpoint/ruleset.ts @@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack" n=[i], o=[j], p=[{[t]:"Region"}]; -const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iottwinmaker-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iottwinmaker-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://iottwinmaker.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://iottwinmaker.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]}; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://iottwinmaker-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://iottwinmaker-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://iottwinmaker.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://iottwinmaker.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-ivschat/src/endpoint/ruleset.ts b/clients/client-ivschat/src/endpoint/ruleset.ts index 55f3737866aa8..0afae97b80a9d 100644 --- a/clients/client-ivschat/src/endpoint/ruleset.ts +++ b/clients/client-ivschat/src/endpoint/ruleset.ts @@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack" n=[i], o=[j], p=[{[t]:"Region"}]; -const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://ivschat-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://ivschat-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://ivschat.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://ivschat.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]}; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://ivschat-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://ivschat-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://ivschat.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://ivschat.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-kafka/src/endpoint/ruleset.ts b/clients/client-kafka/src/endpoint/ruleset.ts index 1526f7330d7ac..0e6c83cc536fc 100644 --- a/clients/client-kafka/src/endpoint/ruleset.ts +++ b/clients/client-kafka/src/endpoint/ruleset.ts @@ -28,5 +28,5 @@ o={[t]:"booleanEquals",[u]:[true,{[t]:f,[u]:[n,"supportsDualStack"]}]}, p=[j], q=[k], r=[{[v]:"Region"}]; -const _data={version:"1.0",parameters:{Region:g,UseDualStack:h,UseFIPS:h,Endpoint:g},rules:[{conditions:[{[t]:a,[u]:[i]}],type:b,rules:[{conditions:p,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:q,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:i,properties:l,headers:l},type:d}]}]},{type:b,rules:[{conditions:[{[t]:a,[u]:r}],type:b,rules:[{conditions:[{[t]:"aws.partition",[u]:r,assign:e}],type:b,rules:[{conditions:[j,k],type:b,rules:[{conditions:[m,o],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://kafka-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:p,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{conditions:[{[t]:"stringEquals",[u]:["aws-us-gov",{[t]:f,[u]:[n,"name"]}]}],endpoint:{url:"https://kafka.{Region}.amazonaws.com",properties:l,headers:l},type:d},{endpoint:{url:"https://kafka-fips.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:q,type:b,rules:[{conditions:[o],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://kafka.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://kafka.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]}; +const _data={version:"1.0",parameters:{Region:g,UseDualStack:h,UseFIPS:h,Endpoint:g},rules:[{conditions:[{[t]:a,[u]:[i]}],type:b,rules:[{conditions:p,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:q,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:i,properties:l,headers:l},type:d}]},{conditions:[{[t]:a,[u]:r}],type:b,rules:[{conditions:[{[t]:"aws.partition",[u]:r,assign:e}],type:b,rules:[{conditions:[j,k],type:b,rules:[{conditions:[m,o],type:b,rules:[{endpoint:{url:"https://kafka-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:p,type:b,rules:[{conditions:[m],type:b,rules:[{conditions:[{[t]:"stringEquals",[u]:["aws-us-gov",{[t]:f,[u]:[n,"name"]}]}],endpoint:{url:"https://kafka.{Region}.amazonaws.com",properties:l,headers:l},type:d},{endpoint:{url:"https://kafka-fips.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:q,type:b,rules:[{conditions:[o],type:b,rules:[{endpoint:{url:"https://kafka.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://kafka.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-kendra-ranking/src/endpoint/EndpointParameters.ts b/clients/client-kendra-ranking/src/endpoint/EndpointParameters.ts index a58911edc7ff2..78086eb303764 100644 --- a/clients/client-kendra-ranking/src/endpoint/EndpointParameters.ts +++ b/clients/client-kendra-ranking/src/endpoint/EndpointParameters.ts @@ -25,7 +25,7 @@ export const resolveClientEndpointParameters =CreateSlotTypeVersion
operation.
*
- *
* When you create a version of a slot type, Amazon Lex sets the version to * 1. Subsequent versions increment by 1. For more information, see versioning-intro.
- * *This operation requires permissions for the
* lex:CreateSlotTypeVersion
action.
DeleteBot
operation is
* successful.
- *
* This operation requires permissions for the
* lex:DeleteBot
action.
DeleteIntent
is successful.
*
- *
* This operation requires permission for the
* lex:DeleteIntent
action.
For a list of built-in slot types, see Slot Type Reference in the Alexa Skills * Kit.
- * *This operation requires permission for the
* lex:GetBuiltInSlotTypes
action.
privacySettings
fields, which are set to their default
* values. If you don't specify values for required fields, Amazon Lex throws an
* exception.
- *
* This operation requires permissions for the lex:PutBot
* action. For more information, see security-iam.
You can specify other optional information in the request, such * as:
- * *A confirmation prompt to ask the user to confirm an intent. For
@@ -111,13 +110,13 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* { // Slot
* name: "STRING_VALUE", // required
* description: "STRING_VALUE",
- * slotConstraint: "STRING_VALUE", // required
+ * slotConstraint: "Required" || "Optional", // required
* slotType: "STRING_VALUE",
* slotTypeVersion: "STRING_VALUE",
* valueElicitationPrompt: { // Prompt
* messages: [ // MessageList // required
* { // Message
- * contentType: "STRING_VALUE", // required
+ * contentType: "PlainText" || "SSML" || "CustomPayload", // required
* content: "STRING_VALUE", // required
* groupNumber: Number("int"),
* },
@@ -130,7 +129,7 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* "STRING_VALUE",
* ],
* responseCard: "STRING_VALUE",
- * obfuscationSetting: "STRING_VALUE",
+ * obfuscationSetting: "NONE" || "DEFAULT_OBFUSCATION",
* defaultValueSpec: { // SlotDefaultValueSpec
* defaultValueList: [ // SlotDefaultValueList // required
* { // SlotDefaultValue
@@ -146,7 +145,7 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* confirmationPrompt: {
* messages: [ // required
* {
- * contentType: "STRING_VALUE", // required
+ * contentType: "PlainText" || "SSML" || "CustomPayload", // required
* content: "STRING_VALUE", // required
* groupNumber: Number("int"),
* },
@@ -157,7 +156,7 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* rejectionStatement: { // Statement
* messages: [ // required
* {
- * contentType: "STRING_VALUE", // required
+ * contentType: "PlainText" || "SSML" || "CustomPayload", // required
* content: "STRING_VALUE", // required
* groupNumber: Number("int"),
* },
@@ -180,7 +179,7 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* messageVersion: "STRING_VALUE", // required
* },
* fulfillmentActivity: { // FulfillmentActivity
- * type: "STRING_VALUE", // required
+ * type: "ReturnIntent" || "CodeHook", // required
* codeHook: {
* uri: "STRING_VALUE", // required
* messageVersion: "STRING_VALUE", // required
@@ -216,13 +215,13 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* // { // Slot
* // name: "STRING_VALUE", // required
* // description: "STRING_VALUE",
- * // slotConstraint: "STRING_VALUE", // required
+ * // slotConstraint: "Required" || "Optional", // required
* // slotType: "STRING_VALUE",
* // slotTypeVersion: "STRING_VALUE",
* // valueElicitationPrompt: { // Prompt
* // messages: [ // MessageList // required
* // { // Message
- * // contentType: "STRING_VALUE", // required
+ * // contentType: "PlainText" || "SSML" || "CustomPayload", // required
* // content: "STRING_VALUE", // required
* // groupNumber: Number("int"),
* // },
@@ -235,7 +234,7 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* // "STRING_VALUE",
* // ],
* // responseCard: "STRING_VALUE",
- * // obfuscationSetting: "STRING_VALUE",
+ * // obfuscationSetting: "NONE" || "DEFAULT_OBFUSCATION",
* // defaultValueSpec: { // SlotDefaultValueSpec
* // defaultValueList: [ // SlotDefaultValueList // required
* // { // SlotDefaultValue
@@ -251,7 +250,7 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* // confirmationPrompt: {
* // messages: [ // required
* // {
- * // contentType: "STRING_VALUE", // required
+ * // contentType: "PlainText" || "SSML" || "CustomPayload", // required
* // content: "STRING_VALUE", // required
* // groupNumber: Number("int"),
* // },
@@ -262,7 +261,7 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* // rejectionStatement: { // Statement
* // messages: [ // required
* // {
- * // contentType: "STRING_VALUE", // required
+ * // contentType: "PlainText" || "SSML" || "CustomPayload", // required
* // content: "STRING_VALUE", // required
* // groupNumber: Number("int"),
* // },
@@ -285,7 +284,7 @@ export interface PutIntentCommandOutput extends PutIntentResponse, __MetadataBea
* // messageVersion: "STRING_VALUE", // required
* // },
* // fulfillmentActivity: { // FulfillmentActivity
- * // type: "STRING_VALUE", // required
+ * // type: "ReturnIntent" || "CodeHook", // required
* // codeHook: {
* // uri: "STRING_VALUE", // required
* // messageVersion: "STRING_VALUE", // required
diff --git a/clients/client-lex-model-building-service/src/commands/PutSlotTypeCommand.ts b/clients/client-lex-model-building-service/src/commands/PutSlotTypeCommand.ts
index c8bc25bafbc35..2fca51dd62ddf 100644
--- a/clients/client-lex-model-building-service/src/commands/PutSlotTypeCommand.ts
+++ b/clients/client-lex-model-building-service/src/commands/PutSlotTypeCommand.ts
@@ -53,7 +53,6 @@ export interface PutSlotTypeCommandOutput extends PutSlotTypeResponse, __Metadat
* uses the $LATEST
version of an intent that contains the slot
* type, the bot's status
field is set to
* NOT_BUILT
.
This operation requires permissions for the
* lex:PutSlotType
action.
Consider the following examples:
*Currently, Amazon Lex supports associations with Facebook and Slack, * and Twilio.
- * *Specifies the target locale for the bot. Any intent used in the * bot must be compatible with the locale of the bot.
- * *The default is en-US
.
An array of utterances (strings) that a user might say to signal * the intent. For example, "I want \{PizzaSize\} pizza", "Order \{Quantity\} * \{PizzaSize\} pizzas".
- * *In each utterance, a slot name is enclosed in curly braces. *
*/ @@ -4540,7 +4535,6 @@ export interface PutIntentRequest { * prompt again. *The followUpPrompt
field and the
* conclusionStatement
field are mutually exclusive. You can
* specify only one.
Aggregated details about the measures contributing to the anomaly group, and the measures * potentially impacted by the anomaly group.
- * + * */ export interface InterMetricImpactDetails { /** diff --git a/clients/client-lookoutvision/src/commands/CreateDatasetCommand.ts b/clients/client-lookoutvision/src/commands/CreateDatasetCommand.ts index 14493df078d32..2ad4281dd908b 100644 --- a/clients/client-lookoutvision/src/commands/CreateDatasetCommand.ts +++ b/clients/client-lookoutvision/src/commands/CreateDatasetCommand.ts @@ -40,11 +40,11 @@ export interface CreateDatasetCommandOutput extends CreateDatasetResponse, __Met * training or a test dataset from a valid dataset source (DatasetSource
).
* If you want a single dataset project, specify train
for the value of
* DatasetType
.
To have a project with separate training and test datasets, call CreateDataset
twice.
+ *
To have a project with separate training and test datasets, call CreateDataset
twice.
* On the first call, specify train
for the value of
* DatasetType
. On the second call, specify test
for the value of
* DatasetType
.
This operation requires permissions to perform the + *
This operation requires permissions to perform the
* lookoutvision:CreateDataset
operation.
This operation requires permissions to perform the
* lookoutvision:DescribeModelPackagingJob
operation.
For more information, see * Using your Amazon Lookout for Vision model on an edge device in the Amazon Lookout for Vision Developer Guide.
* @example diff --git a/clients/client-lookoutvision/src/commands/DetectAnomaliesCommand.ts b/clients/client-lookoutvision/src/commands/DetectAnomaliesCommand.ts index 1e51aac825606..fd92728489d37 100644 --- a/clients/client-lookoutvision/src/commands/DetectAnomaliesCommand.ts +++ b/clients/client-lookoutvision/src/commands/DetectAnomaliesCommand.ts @@ -54,9 +54,7 @@ export interface DetectAnomaliesCommandOutput extends DetectAnomaliesResponse, _ * You are charged for the amount of time, in minutes, that a model runs and for the number of anomaly detection units that your * model uses. If you are not using a model, use the StopModel operation to stop your model. * - * *For more information, see Detecting anomalies in an image in the Amazon Lookout for Vision developer guide.
- * *This operation requires permissions to perform the
* lookoutvision:DetectAnomalies
operation.
This operation requires permissions to perform the
* lookoutvision:ListModelPackagingJobs
operation.
*
For more information, see * Using your Amazon Lookout for Vision model on an edge device in the Amazon Lookout for Vision Developer Guide.
* @example diff --git a/clients/client-lookoutvision/src/commands/StartModelPackagingJobCommand.ts b/clients/client-lookoutvision/src/commands/StartModelPackagingJobCommand.ts index a47b7f07d6d04..d223f1cf9d3f5 100644 --- a/clients/client-lookoutvision/src/commands/StartModelPackagingJobCommand.ts +++ b/clients/client-lookoutvision/src/commands/StartModelPackagingJobCommand.ts @@ -39,14 +39,12 @@ export interface StartModelPackagingJobCommandOutput extends StartModelPackaging *Starts an Amazon Lookout for Vision model packaging job. A model packaging job creates an AWS IoT Greengrass component for * a Lookout for Vision model. You can use the component to deploy your model to an edge device managed by Greengrass. *
- * *Use the DescribeModelPackagingJob API to determine the current status of the job.
*
* The model packaging job is complete if the value of Status
is SUCCEEDED
.
To deploy the component * to the target device, use the component name and component version * with the AWS IoT Greengrass CreateDeployment API.
- * *This operation requires the following permissions:
*(Optional) greengrass:TagResource
. Only required if you want to tag the component.
For more information, see * Using your Amazon Lookout for Vision model on an edge device in the Amazon Lookout for Vision Developer Guide.
* @example diff --git a/clients/client-lookoutvision/src/commands/UpdateDatasetEntriesCommand.ts b/clients/client-lookoutvision/src/commands/UpdateDatasetEntriesCommand.ts index 7959ea3533263..f3c589fb6548c 100644 --- a/clients/client-lookoutvision/src/commands/UpdateDatasetEntriesCommand.ts +++ b/clients/client-lookoutvision/src/commands/UpdateDatasetEntriesCommand.ts @@ -41,16 +41,12 @@ export interface UpdateDatasetEntriesCommandOutput extends UpdateDatasetEntriesR *To update an existing JSON Line, use the source-ref
field to identify the JSON Line. The JSON line
* that you supply replaces the existing JSON line. Any existing annotations that are not in the new JSON line are removed from the dataset.
*
For more information, see * Defining JSON lines for anomaly classification in the Amazon Lookout for Vision Developer Guide.
- * *The images you reference in the source-ref
field of a JSON line, must be
* in the same S3 bucket as the existing images in the dataset.
Updating a dataset might take a while to complete. To check the current status, call DescribeDataset and
* check the Status
field in the response.
This operation requires permissions to perform the
diff --git a/clients/client-lookoutvision/src/endpoint/EndpointParameters.ts b/clients/client-lookoutvision/src/endpoint/EndpointParameters.ts
index e95924afa105e..058b766e80b6e 100644
--- a/clients/client-lookoutvision/src/endpoint/EndpointParameters.ts
+++ b/clients/client-lookoutvision/src/endpoint/EndpointParameters.ts
@@ -27,7 +27,7 @@ export const resolveClientEndpointParameters = If you don't supply a value for An error occurs if the other input parameters are not the same as in the first request. Using a different
+ * An error occurs if the other input parameters are not the same as in the first request. Using a different
* value for If you don't supply a value for ClientToken, the AWS SDK you are using inserts a value for you.
* This prevents retries after a network error from making multiple model deletion requests. You'll need to
* provide your own value for other use cases. An error occurs if the other input parameters are not the same as in the first request. Using a different
* value for Additional compiler options for the Greengrass component. Currently,
* only NVIDIA Graphics Processing Units (GPU) and CPU accelerators are supported.
* If you specify For more information, see
* Compiler options in the Amazon Lookout for Vision Developer Guide. If you don't supply a value for An error occurs if the other input parameters are not the same as in the first request. Using a different
* value for Amazon Macie Classic has been discontinued and is no longer available. A new Amazon Macie is now available with significant design improvements and additional
features, at a lower price and in most Amazon Web Services Regions. We encourage you to take advantage of the
new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Macie, see Amazon Macie. To learn how to use the new Macie, see the Amazon Macie User
diff --git a/clients/client-macie/src/Macie.ts b/clients/client-macie/src/Macie.ts
index b3f2d9fb16194..b6480c57dc393 100644
--- a/clients/client-macie/src/Macie.ts
+++ b/clients/client-macie/src/Macie.ts
@@ -171,7 +171,6 @@ export interface Macie {
* @public
* Amazon Macie Classic has been discontinued and is no longer available. A new Amazon Macie is now available with significant design improvements and additional
* features, at a lower price and in most Amazon Web Services Regions. We encourage you to take advantage of the
* new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Macie, see Amazon Macie. To learn how to use the new Macie, see the Amazon Macie User
diff --git a/clients/client-macie/src/MacieClient.ts b/clients/client-macie/src/MacieClient.ts
index 268697b3b5353..b193b358c5f7b 100644
--- a/clients/client-macie/src/MacieClient.ts
+++ b/clients/client-macie/src/MacieClient.ts
@@ -276,7 +276,6 @@ export interface MacieClientResolvedConfig extends MacieClientResolvedConfigType
* @public
* Amazon Macie Classic has been discontinued and is no longer available. A new Amazon Macie is now available with significant design improvements and additional
* features, at a lower price and in most Amazon Web Services Regions. We encourage you to take advantage of the
* new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Macie, see Amazon Macie. To learn how to use the new Macie, see the Amazon Macie User
diff --git a/clients/client-macie/src/commands/AssociateS3ResourcesCommand.ts b/clients/client-macie/src/commands/AssociateS3ResourcesCommand.ts
index c9848208d6f01..13a80788efa78 100644
--- a/clients/client-macie/src/commands/AssociateS3ResourcesCommand.ts
+++ b/clients/client-macie/src/commands/AssociateS3ResourcesCommand.ts
@@ -54,8 +54,8 @@ export interface AssociateS3ResourcesCommandOutput extends AssociateS3ResourcesR
* bucketName: "STRING_VALUE", // required
* prefix: "STRING_VALUE",
* classificationType: { // ClassificationType
- * oneTime: "STRING_VALUE", // required
- * continuous: "STRING_VALUE", // required
+ * oneTime: "FULL" || "NONE", // required
+ * continuous: "FULL", // required
* },
* },
* ],
diff --git a/clients/client-macie/src/commands/ListS3ResourcesCommand.ts b/clients/client-macie/src/commands/ListS3ResourcesCommand.ts
index 68bf5dabe4f63..b57462a6a7d2d 100644
--- a/clients/client-macie/src/commands/ListS3ResourcesCommand.ts
+++ b/clients/client-macie/src/commands/ListS3ResourcesCommand.ts
@@ -60,8 +60,8 @@ export interface ListS3ResourcesCommandOutput extends ListS3ResourcesResult, __M
* // bucketName: "STRING_VALUE", // required
* // prefix: "STRING_VALUE",
* // classificationType: { // ClassificationType
- * // oneTime: "STRING_VALUE", // required
- * // continuous: "STRING_VALUE", // required
+ * // oneTime: "FULL" || "NONE", // required
+ * // continuous: "FULL", // required
* // },
* // },
* // ],
diff --git a/clients/client-macie/src/commands/UpdateS3ResourcesCommand.ts b/clients/client-macie/src/commands/UpdateS3ResourcesCommand.ts
index 161ab604a5609..e101b25bd01e6 100644
--- a/clients/client-macie/src/commands/UpdateS3ResourcesCommand.ts
+++ b/clients/client-macie/src/commands/UpdateS3ResourcesCommand.ts
@@ -54,8 +54,8 @@ export interface UpdateS3ResourcesCommandOutput extends UpdateS3ResourcesResult,
* bucketName: "STRING_VALUE", // required
* prefix: "STRING_VALUE",
* classificationTypeUpdate: { // ClassificationTypeUpdate
- * oneTime: "STRING_VALUE",
- * continuous: "STRING_VALUE",
+ * oneTime: "FULL" || "NONE",
+ * continuous: "FULL",
* },
* },
* ],
diff --git a/clients/client-macie/src/endpoint/ruleset.ts b/clients/client-macie/src/endpoint/ruleset.ts
index 3aedd46c000ac..1e87941400ad4 100644
--- a/clients/client-macie/src/endpoint/ruleset.ts
+++ b/clients/client-macie/src/endpoint/ruleset.ts
@@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types";
or see "smithy.rules#endpointRuleSet"
in codegen/sdk-codegen/aws-models/macie.json */
-const p="required",
-q="fn",
-r="argv",
-s="ref";
-const a="PartitionResult",
+const q="required",
+r="fn",
+s="argv",
+t="ref";
+const a="isSet",
b="tree",
c="error",
d="endpoint",
-e={[p]:false,"type":"String"},
-f={[p]:true,"default":false,"type":"Boolean"},
-g={[s]:"Endpoint"},
-h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]},
-i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]},
-j={},
-k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]},
-l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]},
-m=[g],
-n=[h],
-o=[i];
-const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://macie-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://macie-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://macie.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://macie.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]};
+e="PartitionResult",
+f={[q]:false,"type":"String"},
+g={[q]:true,"default":false,"type":"Boolean"},
+h={[t]:"Endpoint"},
+i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]},
+j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]},
+k={},
+l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]},
+m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]},
+n=[i],
+o=[j],
+p=[{[t]:"Region"}];
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://macie-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://macie-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://macie.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://macie.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-macie/src/index.ts b/clients/client-macie/src/index.ts
index 5d235e94b843a..797edca5208ec 100644
--- a/clients/client-macie/src/index.ts
+++ b/clients/client-macie/src/index.ts
@@ -3,7 +3,6 @@
/**
* Amazon Macie Classic has been discontinued and is no longer available. A new Amazon Macie is now available with significant design improvements and additional
* features, at a lower price and in most Amazon Web Services Regions. We encourage you to take advantage of the
* new and improved features, and benefit from the reduced cost. To learn about features and pricing for the new Macie, see Amazon Macie. To learn how to use the new Macie, see the Amazon Macie User
diff --git a/clients/client-macie2/src/endpoint/ruleset.ts b/clients/client-macie2/src/endpoint/ruleset.ts
index 78ad5098d1283..d701c6fa3374d 100644
--- a/clients/client-macie2/src/endpoint/ruleset.ts
+++ b/clients/client-macie2/src/endpoint/ruleset.ts
@@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"
n=[i],
o=[j],
p=[{[t]:"Region"}];
-const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://macie2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://macie2-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://macie2.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://macie2.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]};
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://macie2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://macie2-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://macie2.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://macie2.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-managedblockchain/src/endpoint/ruleset.ts b/clients/client-managedblockchain/src/endpoint/ruleset.ts
index 7ffbf53632d12..3ca4a47f0cc51 100644
--- a/clients/client-managedblockchain/src/endpoint/ruleset.ts
+++ b/clients/client-managedblockchain/src/endpoint/ruleset.ts
@@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"
n=[i],
o=[j],
p=[{[t]:"Region"}];
-const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://managedblockchain-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://managedblockchain-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://managedblockchain.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://managedblockchain.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]};
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://managedblockchain-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://managedblockchain-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://managedblockchain.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://managedblockchain.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-marketplace-catalog/src/endpoint/ruleset.ts b/clients/client-marketplace-catalog/src/endpoint/ruleset.ts
index da1cf58de9364..609ddde879ffc 100644
--- a/clients/client-marketplace-catalog/src/endpoint/ruleset.ts
+++ b/clients/client-marketplace-catalog/src/endpoint/ruleset.ts
@@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"
n=[i],
o=[j],
p=[{[t]:"Region"}];
-const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://catalog.marketplace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://catalog.marketplace-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://catalog.marketplace.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://catalog.marketplace.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]};
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://catalog.marketplace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://catalog.marketplace-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://catalog.marketplace.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://catalog.marketplace.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-marketplace-commerce-analytics/src/commands/GenerateDataSetCommand.ts b/clients/client-marketplace-commerce-analytics/src/commands/GenerateDataSetCommand.ts
index 92e36e4c19db3..419b6ceae6192 100644
--- a/clients/client-marketplace-commerce-analytics/src/commands/GenerateDataSetCommand.ts
+++ b/clients/client-marketplace-commerce-analytics/src/commands/GenerateDataSetCommand.ts
@@ -55,7 +55,7 @@ export interface GenerateDataSetCommandOutput extends GenerateDataSetResult, __M
* // const { MarketplaceCommerceAnalyticsClient, GenerateDataSetCommand } = require("@aws-sdk/client-marketplace-commerce-analytics"); // CommonJS import
* const client = new MarketplaceCommerceAnalyticsClient(config);
* const input = { // GenerateDataSetRequest
- * dataSetType: "STRING_VALUE", // required
+ * dataSetType: "customer_subscriber_hourly_monthly_subscriptions" || "customer_subscriber_annual_subscriptions" || "daily_business_usage_by_instance_type" || "daily_business_fees" || "daily_business_free_trial_conversions" || "daily_business_new_instances" || "daily_business_new_product_subscribers" || "daily_business_canceled_product_subscribers" || "monthly_revenue_billing_and_revenue_data" || "monthly_revenue_annual_subscriptions" || "monthly_revenue_field_demonstration_usage" || "monthly_revenue_flexible_payment_schedule" || "disbursed_amount_by_product" || "disbursed_amount_by_product_with_uncollected_funds" || "disbursed_amount_by_instance_hours" || "disbursed_amount_by_customer_geo" || "disbursed_amount_by_age_of_uncollected_funds" || "disbursed_amount_by_age_of_disbursed_funds" || "disbursed_amount_by_age_of_past_due_funds" || "disbursed_amount_by_uncollected_funds_breakdown" || "customer_profile_by_industry" || "customer_profile_by_revenue" || "customer_profile_by_geography" || "sales_compensation_billed_revenue" || "us_sales_and_use_tax_records", // required
* dataSetPublicationDate: new Date("TIMESTAMP"), // required
* roleNameArn: "STRING_VALUE", // required
* destinationS3BucketName: "STRING_VALUE", // required
diff --git a/clients/client-marketplace-commerce-analytics/src/commands/StartSupportDataExportCommand.ts b/clients/client-marketplace-commerce-analytics/src/commands/StartSupportDataExportCommand.ts
index af0cf73433409..39896f32ecb1d 100644
--- a/clients/client-marketplace-commerce-analytics/src/commands/StartSupportDataExportCommand.ts
+++ b/clients/client-marketplace-commerce-analytics/src/commands/StartSupportDataExportCommand.ts
@@ -55,7 +55,7 @@ export interface StartSupportDataExportCommandOutput extends StartSupportDataExp
* // const { MarketplaceCommerceAnalyticsClient, StartSupportDataExportCommand } = require("@aws-sdk/client-marketplace-commerce-analytics"); // CommonJS import
* const client = new MarketplaceCommerceAnalyticsClient(config);
* const input = { // StartSupportDataExportRequest
- * dataSetType: "STRING_VALUE", // required
+ * dataSetType: "customer_support_contacts_data" || "test_customer_support_contacts_data", // required
* fromDate: new Date("TIMESTAMP"), // required
* roleNameArn: "STRING_VALUE", // required
* destinationS3BucketName: "STRING_VALUE", // required
diff --git a/clients/client-marketplace-commerce-analytics/src/endpoint/ruleset.ts b/clients/client-marketplace-commerce-analytics/src/endpoint/ruleset.ts
index dc2a0847ae493..e2dbc8a8a0db1 100644
--- a/clients/client-marketplace-commerce-analytics/src/endpoint/ruleset.ts
+++ b/clients/client-marketplace-commerce-analytics/src/endpoint/ruleset.ts
@@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types";
or see "smithy.rules#endpointRuleSet"
in codegen/sdk-codegen/aws-models/marketplace-commerce-analytics.json */
-const p="required",
-q="fn",
-r="argv",
-s="ref";
-const a="PartitionResult",
+const q="required",
+r="fn",
+s="argv",
+t="ref";
+const a="isSet",
b="tree",
c="error",
d="endpoint",
-e={[p]:false,"type":"String"},
-f={[p]:true,"default":false,"type":"Boolean"},
-g={[s]:"Endpoint"},
-h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]},
-i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]},
-j={},
-k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]},
-l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]},
-m=[g],
-n=[h],
-o=[i];
-const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://marketplacecommerceanalytics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://marketplacecommerceanalytics-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://marketplacecommerceanalytics.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://marketplacecommerceanalytics.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]};
+e="PartitionResult",
+f={[q]:false,"type":"String"},
+g={[q]:true,"default":false,"type":"Boolean"},
+h={[t]:"Endpoint"},
+i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]},
+j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]},
+k={},
+l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]},
+m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]},
+n=[i],
+o=[j],
+p=[{[t]:"Region"}];
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://marketplacecommerceanalytics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://marketplacecommerceanalytics-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://marketplacecommerceanalytics.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://marketplacecommerceanalytics.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-marketplace-commerce-analytics/src/models/models_0.ts b/clients/client-marketplace-commerce-analytics/src/models/models_0.ts
index 8783655f082f8..e685920d92080 100644
--- a/clients/client-marketplace-commerce-analytics/src/models/models_0.ts
+++ b/clients/client-marketplace-commerce-analytics/src/models/models_0.ts
@@ -237,10 +237,19 @@ export class MarketplaceCommerceAnalyticsException extends __BaseException {
}
}
+/**
+ * @public
+ * @enum
+ */
+export const SupportDataSetType = {
+ customer_support_contacts_data: "customer_support_contacts_data",
+ test_customer_support_contacts_data: "test_customer_support_contacts_data",
+} as const;
+
/**
* @public
*/
-export type SupportDataSetType = "customer_support_contacts_data" | "test_customer_support_contacts_data";
+export type SupportDataSetType = (typeof SupportDataSetType)[keyof typeof SupportDataSetType];
/**
* @public
diff --git a/clients/client-marketplace-entitlement-service/src/endpoint/ruleset.ts b/clients/client-marketplace-entitlement-service/src/endpoint/ruleset.ts
index 8fb769a524b26..a6884e5cc52f3 100644
--- a/clients/client-marketplace-entitlement-service/src/endpoint/ruleset.ts
+++ b/clients/client-marketplace-entitlement-service/src/endpoint/ruleset.ts
@@ -6,24 +6,27 @@ import { RuleSetObject } from "@smithy/types";
or see "smithy.rules#endpointRuleSet"
in codegen/sdk-codegen/aws-models/marketplace-entitlement-service.json */
-const p="required",
-q="fn",
-r="argv",
-s="ref";
-const a="PartitionResult",
+const s="required",
+t="fn",
+u="argv",
+v="ref";
+const a="isSet",
b="tree",
c="error",
d="endpoint",
-e={[p]:false,"type":"String"},
-f={[p]:true,"default":false,"type":"Boolean"},
-g={[s]:"Endpoint"},
-h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]},
-i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]},
-j={},
-k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]},
-l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]},
-m=[g],
-n=[h],
-o=[i];
-const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://entitlement.marketplace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://entitlement.marketplace-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://entitlement.marketplace.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://entitlement.marketplace.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]};
+e="PartitionResult",
+f="getAttr",
+g={[s]:false,"type":"String"},
+h={[s]:true,"default":false,"type":"Boolean"},
+i={[v]:"Endpoint"},
+j={[t]:"booleanEquals",[u]:[{[v]:"UseFIPS"},true]},
+k={[t]:"booleanEquals",[u]:[{[v]:"UseDualStack"},true]},
+l={},
+m={[t]:"booleanEquals",[u]:[true,{[t]:f,[u]:[{[v]:e},"supportsFIPS"]}]},
+n={[v]:e},
+o={[t]:"booleanEquals",[u]:[true,{[t]:f,[u]:[n,"supportsDualStack"]}]},
+p=[j],
+q=[k],
+r=[{[v]:"Region"}];
+const _data={version:"1.0",parameters:{Region:g,UseDualStack:h,UseFIPS:h,Endpoint:g},rules:[{conditions:[{[t]:a,[u]:[i]}],type:b,rules:[{conditions:p,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:q,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:i,properties:l,headers:l},type:d}]},{conditions:[{[t]:a,[u]:r}],type:b,rules:[{conditions:[{[t]:"aws.partition",[u]:r,assign:e}],type:b,rules:[{conditions:[j,k],type:b,rules:[{conditions:[m,o],type:b,rules:[{endpoint:{url:"https://entitlement.marketplace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:p,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://entitlement.marketplace-fips.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:q,type:b,rules:[{conditions:[o],type:b,rules:[{endpoint:{url:"https://entitlement.marketplace.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{conditions:[{[t]:"stringEquals",[u]:["aws",{[t]:f,[u]:[n,"name"]}]}],endpoint:{url:"https://entitlement.marketplace.{Region}.amazonaws.com",properties:l,headers:l},type:d},{endpoint:{url:"https://entitlement.marketplace.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-marketplace-metering/src/MarketplaceMetering.ts b/clients/client-marketplace-metering/src/MarketplaceMetering.ts
index 6d91fea9b4eaa..c10d2839857d3 100644
--- a/clients/client-marketplace-metering/src/MarketplaceMetering.ts
+++ b/clients/client-marketplace-metering/src/MarketplaceMetering.ts
@@ -82,37 +82,37 @@ export interface MarketplaceMetering {
/**
* @public
* This reference provides descriptions of the low-level AWS Marketplace Metering Service
+ * This reference provides descriptions of the low-level AWS Marketplace Metering Service
* API. AWS Marketplace sellers can use this API to submit usage data for custom usage
+ * AWS Marketplace sellers can use this API to submit usage data for custom usage
* dimensions. For information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the
+ * For information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the
* AWS Marketplace Seller Guide.
- *
+ *
* Submitting Metering Records
- *
- * MeterUsage - Submits the metering record for an AWS
+ *
+ * MeterUsage - Submits the metering record for an AWS
* Marketplace product.
- * BatchMeterUsage - Submits the metering record for a set of
+ *
+ * BatchMeterUsage - Submits the metering record for a set of
* customers.
+ *
* Accepting New Customers
- *
- * ResolveCustomer - Called by a SaaS application during the
+ *
+ * ResolveCustomer - Called by a SaaS application during the
* registration process. When a buyer visits your website during the registration
* process, the buyer submits a Registration Token through the browser. The
* Registration Token is resolved through this API to obtain a
@@ -122,12 +122,12 @@ export interface MarketplaceMetering {
*
+ *
* Entitlement and Metering for Paid Container Products
- * Paid container software products sold through AWS Marketplace must integrate
+ * Paid container software products sold through AWS Marketplace must integrate
* with the AWS Marketplace Metering Service and call the
*
+ *
* This reference provides descriptions of the low-level AWS Marketplace Metering Service
+ * This reference provides descriptions of the low-level AWS Marketplace Metering Service
* API. AWS Marketplace sellers can use this API to submit usage data for custom usage
+ * AWS Marketplace sellers can use this API to submit usage data for custom usage
* dimensions. For information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the
+ * For information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the
* AWS Marketplace Seller Guide.
- *
+ *
* Submitting Metering Records
- *
- * MeterUsage - Submits the metering record for an AWS
+ *
+ * MeterUsage - Submits the metering record for an AWS
* Marketplace product.
- * BatchMeterUsage - Submits the metering record for a set of
+ *
+ * BatchMeterUsage - Submits the metering record for a set of
* customers.
+ *
* Accepting New Customers
- *
- * ResolveCustomer - Called by a SaaS application during the
+ *
+ * ResolveCustomer - Called by a SaaS application during the
* registration process. When a buyer visits your website during the registration
* process, the buyer submits a Registration Token through the browser. The
* Registration Token is resolved through this API to obtain a
@@ -294,12 +294,12 @@ export interface MarketplaceMeteringClientResolvedConfig extends MarketplaceMete
*
+ *
* Entitlement and Metering for Paid Container Products
- * ClientToken
, the AWS SDK you are using inserts a value for you.
* This prevents retries after a network error from starting multiple training jobs. You'll need to
* provide your own value for other use cases. ClientToken
is considered a new call to CreateModel
. An idempotency
* token is active for 8 hours.ClientToken
is considered a new call to DeleteModel
. An idempotency
* token is active for 8 hours.TargetDevice
, don't specify CompilerOptions
.ClientToken
, the AWS SDK you are using inserts a value for you.
* This prevents retries after a network error from making multiple start requests. You'll need to
* provide your own value for other use cases. ClientToken
is considered a new call to StartModel
. An idempotency
* token is active for 8 hours.
diff --git a/clients/client-m2/src/endpoint/ruleset.ts b/clients/client-m2/src/endpoint/ruleset.ts
index 7518c967e1579..38ff32bceceb8 100644
--- a/clients/client-m2/src/endpoint/ruleset.ts
+++ b/clients/client-m2/src/endpoint/ruleset.ts
@@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"
n=[i],
o=[j],
p=[{[t]:"Region"}];
-const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://m2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://m2-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://m2.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://m2.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]};
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://m2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://m2-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://m2.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://m2.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-machine-learning/src/commands/AddTagsCommand.ts b/clients/client-machine-learning/src/commands/AddTagsCommand.ts
index 6dc93d402cc60..9b0fab4757762 100644
--- a/clients/client-machine-learning/src/commands/AddTagsCommand.ts
+++ b/clients/client-machine-learning/src/commands/AddTagsCommand.ts
@@ -53,13 +53,13 @@ export interface AddTagsCommandOutput extends AddTagsOutput, __MetadataBearer {}
* },
* ],
* ResourceId: "STRING_VALUE", // required
- * ResourceType: "STRING_VALUE", // required
+ * ResourceType: "BatchPrediction" || "DataSource" || "Evaluation" || "MLModel", // required
* };
* const command = new AddTagsCommand(input);
* const response = await client.send(command);
* // { // AddTagsOutput
* // ResourceId: "STRING_VALUE",
- * // ResourceType: "STRING_VALUE",
+ * // ResourceType: "BatchPrediction" || "DataSource" || "Evaluation" || "MLModel",
* // };
*
* ```
diff --git a/clients/client-machine-learning/src/commands/CreateMLModelCommand.ts b/clients/client-machine-learning/src/commands/CreateMLModelCommand.ts
index f33199b070494..7498bfdc00547 100644
--- a/clients/client-machine-learning/src/commands/CreateMLModelCommand.ts
+++ b/clients/client-machine-learning/src/commands/CreateMLModelCommand.ts
@@ -65,7 +65,7 @@ export interface CreateMLModelCommandOutput extends CreateMLModelOutput, __Metad
* const input = { // CreateMLModelInput
* MLModelId: "STRING_VALUE", // required
* MLModelName: "STRING_VALUE",
- * MLModelType: "STRING_VALUE", // required
+ * MLModelType: "REGRESSION" || "BINARY" || "MULTICLASS", // required
* Parameters: { // TrainingParameters
* "
+ *
*
- * MeterUsage
is called from an EC2 instance or a
* container running on EKS or ECS.BatchMeterUsage
is called from a software-as-a-service
* (SaaS) application.
+ *
*
- * ProductCode
.
+ *
*
- * RegisterUsage
operation for software entitlement and metering.
* Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call
@@ -136,7 +136,7 @@ export interface MarketplaceMetering {
* RegisterUsage
operation, see Container-Based Products. BatchMeterUsage
API calls are captured by AWS CloudTrail. You can use
* Cloudtrail to verify that the SaaS metering records that you sent are accurate by
* searching for records with the eventName
of BatchMeterUsage
.
diff --git a/clients/client-marketplace-metering/src/MarketplaceMeteringClient.ts b/clients/client-marketplace-metering/src/MarketplaceMeteringClient.ts
index d9b7739cdea35..91fdfa0970d1e 100644
--- a/clients/client-marketplace-metering/src/MarketplaceMeteringClient.ts
+++ b/clients/client-marketplace-metering/src/MarketplaceMeteringClient.ts
@@ -254,37 +254,37 @@ export interface MarketplaceMeteringClientResolvedConfig extends MarketplaceMete
/**
* @public
*
+ *
*
- * MeterUsage
is called from an EC2 instance or a
* container running on EKS or ECS.BatchMeterUsage
is called from a software-as-a-service
* (SaaS) application.
+ *
*
- * ProductCode
.
+ *
Paid container software products sold through AWS Marketplace must integrate + *
Paid container software products sold through AWS Marketplace must integrate
* with the AWS Marketplace Metering Service and call the
* RegisterUsage
operation for software entitlement and metering.
* Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call
@@ -308,7 +308,7 @@ export interface MarketplaceMeteringClientResolvedConfig extends MarketplaceMete
* RegisterUsage
operation, see Container-Based Products.
+ *
* BatchMeterUsage
API calls are captured by AWS CloudTrail. You can use
* Cloudtrail to verify that the SaaS metering records that you sent are accurate by
* searching for records with the eventName
of BatchMeterUsage
.
diff --git a/clients/client-marketplace-metering/src/commands/BatchMeterUsageCommand.ts b/clients/client-marketplace-metering/src/commands/BatchMeterUsageCommand.ts
index 74ab6ddf04015..7648916abe5b9 100644
--- a/clients/client-marketplace-metering/src/commands/BatchMeterUsageCommand.ts
+++ b/clients/client-marketplace-metering/src/commands/BatchMeterUsageCommand.ts
@@ -43,30 +43,30 @@ export interface BatchMeterUsageCommandOutput extends BatchMeterUsageResult, __M
*
* BatchMeterUsage
is called from a SaaS application listed on AWS
* Marketplace to post metering records for a set of customers.
For identical requests, the API is idempotent; requests can be retried with the same + *
For identical requests, the API is idempotent; requests can be retried with the same * records or a subset of the input records.
- *Every request to BatchMeterUsage
is for one product. If you need to meter
+ *
Every request to BatchMeterUsage
is for one product. If you need to meter
* usage for multiple products, you must make multiple calls to
* BatchMeterUsage
.
Usage records are expected to be submitted as quickly as possible after the event that + *
Usage records are expected to be submitted as quickly as possible after the event that * is being recorded, and are not accepted more than 6 hours after the event.
- *+ *
* BatchMeterUsage
can process up to 25 UsageRecords
at a
* time.
A UsageRecord
can optionally include multiple usage allocations, to
+ *
A UsageRecord
can optionally include multiple usage allocations, to
* provide customers with usage data split into buckets by tags that you define (or allow
* the customer to define).
+ *
* BatchMeterUsage
returns a list of UsageRecordResult
objects,
* showing the result for each UsageRecord
, as well as a list of
* UnprocessedRecords
, indicating errors in the service side that you
* should retry.
+ *
* BatchMeterUsage
requests must be less than 1MB in size.
For an example of using BatchMeterUsage
, see BatchMeterUsage code example in the AWS Marketplace Seller
* Guide.
The timestamp
value passed in the UsageRecord
is out of
* allowed range.
For BatchMeterUsage
, if any of the records are outside of the allowed
+ *
For BatchMeterUsage
, if any of the records are outside of the allowed
* range, the entire batch is not processed. You must remove invalid records and try
* again.
API to emit metering records. For identical requests, the API is idempotent. It simply * returns the metering record ID.
- *+ *
* MeterUsage
is authenticated on the buyer's AWS account using credentials
* from the EC2 instance, ECS task, or EKS pod.
+ *
* MeterUsage
can optionally include multiple usage allocations, to provide
* customers with usage data split into buckets by tags that you define (or allow the
* customer to define).
Usage records are expected to be submitted as quickly as possible after the event that + *
Usage records are expected to be submitted as quickly as possible after the event that * is being recorded, and are not accepted more than 6 hours after the event.
* @example * Use a bare-bones client and the command you need to make an API call. @@ -128,7 +128,7 @@ export interface MeterUsageCommandOutput extends MeterUsageResult, __MetadataBea * @throws {@link TimestampOutOfBoundsException} (client fault) *The timestamp
value passed in the UsageRecord
is out of
* allowed range.
For BatchMeterUsage
, if any of the records are outside of the allowed
+ *
For BatchMeterUsage
, if any of the records are outside of the allowed
* range, the entire batch is not processed. You must remove invalid records and try
* again.
RegisterUsage
. RegisterUsage
performs two primary
* functions: metering and entitlement.
- *
- * Entitlement: RegisterUsage
allows you to
+ *
+ * Entitlement: RegisterUsage
allows you to
* verify that the customer running your paid software is subscribed to your
* product on AWS Marketplace, enabling you to guard against unauthorized use. Your
* container image that integrates with RegisterUsage
is only required
@@ -64,8 +64,8 @@ export interface RegisterUsageCommandOutput extends RegisterUsageResult, __Metad
* running.
- * Metering: RegisterUsage
meters software use
+ *
+ * Metering: RegisterUsage
meters software use
* per ECS task, per hour, or per pod for Amazon EKS with usage prorated to the
* second. A minimum of 1 minute of usage applies to tasks that are short lived.
* For example, if a customer has a 10 node Amazon ECS or Amazon EKS cluster and a
diff --git a/clients/client-marketplace-metering/src/commands/ResolveCustomerCommand.ts b/clients/client-marketplace-metering/src/commands/ResolveCustomerCommand.ts
index 5e41a3397c706..3f455282745b2 100644
--- a/clients/client-marketplace-metering/src/commands/ResolveCustomerCommand.ts
+++ b/clients/client-marketplace-metering/src/commands/ResolveCustomerCommand.ts
@@ -48,12 +48,12 @@ export interface ResolveCustomerCommandOutput extends ResolveCustomerResult, __M
* along with the
* CustomerAWSAccountId
and
* ProductCode
.
The API needs to called from the seller account id used to publish the SaaS * application to successfully resolve the token.
*For an example of using ResolveCustomer
, see ResolveCustomer code example in the AWS Marketplace Seller
* Guide.
This reference provides descriptions of the low-level AWS Marketplace Metering Service + *
This reference provides descriptions of the low-level AWS Marketplace Metering Service * API.
- *AWS Marketplace sellers can use this API to submit usage data for custom usage + *
AWS Marketplace sellers can use this API to submit usage data for custom usage * dimensions.
- *For information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the + *
For information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the * AWS Marketplace Seller Guide. - *
- *+ *
+ ** Submitting Metering Records - *
- *- * MeterUsage - Submits the metering record for an AWS + *
+ * MeterUsage - Submits the metering record for an AWS
* Marketplace product. MeterUsage
is called from an EC2 instance or a
* container running on EKS or ECS.
- * BatchMeterUsage - Submits the metering record for a set of + *
+ * BatchMeterUsage - Submits the metering record for a set of
* customers. BatchMeterUsage
is called from a software-as-a-service
* (SaaS) application.
+ *
* Accepting New Customers - *
- *- * ResolveCustomer - Called by a SaaS application during the + *
+ * ResolveCustomer - Called by a SaaS application during the
* registration process. When a buyer visits your website during the registration
* process, the buyer submits a Registration Token through the browser. The
* Registration Token is resolved through this API to obtain a
@@ -42,12 +42,12 @@
* ProductCode
.
+ *
* Entitlement and Metering for Paid Container Products - *
- *Paid container software products sold through AWS Marketplace must integrate + *
Paid container software products sold through AWS Marketplace must integrate
* with the AWS Marketplace Metering Service and call the
* RegisterUsage
operation for software entitlement and metering.
* Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call
@@ -56,7 +56,7 @@
* RegisterUsage
operation, see Container-Based Products.
+ *
* BatchMeterUsage
API calls are captured by AWS CloudTrail. You can use
* Cloudtrail to verify that the SaaS metering records that you sent are accurate by
* searching for records with the eventName
of BatchMeterUsage
.
diff --git a/clients/client-marketplace-metering/src/models/models_0.ts b/clients/client-marketplace-metering/src/models/models_0.ts
index 031130400140c..2dc94e3858358 100644
--- a/clients/client-marketplace-metering/src/models/models_0.ts
+++ b/clients/client-marketplace-metering/src/models/models_0.ts
@@ -27,7 +27,7 @@ export interface Tag {
/**
* @public
*
Usage allocations allow you to split usage into buckets by tags.
- *Each UsageAllocation
indicates the usage quantity for a specific set of
+ *
Each UsageAllocation
indicates the usage quantity for a specific set of
* tags.
A UsageRecord
indicates a quantity of usage for a given product,
* customer, dimension and time.
Multiple requests with the same UsageRecords
as input will be
+ *
Multiple requests with the same UsageRecords
as input will be
* de-duplicated to prevent double charges.
Timestamp, in UTC, for which the usage is being reported.
- *Your application can meter usage for up to one hour in the past. Make sure the + *
Your application can meter usage for up to one hour in the past. Make sure the
* timestamp
value is not before the start of the software usage.
The UsageRecordResult
* Status
indicates the status of an individual UsageRecord
* processed by BatchMeterUsage
.
- * Success- The UsageRecord
was accepted and
+ *
+ * Success- The UsageRecord
was accepted and
* honored by BatchMeterUsage
.
- * CustomerNotSubscribed- The CustomerIdentifier
+ *
+ * CustomerNotSubscribed- The CustomerIdentifier
* specified is not able to use your product. The UsageRecord
was not
* honored. There are three causes for this result:
The customer identifier is invalid.
- *The customer identifier is invalid.
+ *The customer identifier provided in the metering record does not have + *
The customer identifier provided in the metering record does not have
* an active agreement or subscription with this product. Future
* UsageRecords
for this customer will fail until the
* customer subscribes to your product.
The customer's AWS account was suspended.
- *The customer's AWS account was suspended.
+ *- * DuplicateRecord- Indicates that the + *
+ * DuplicateRecord- Indicates that the
* UsageRecord
was invalid and not honored. A previously metered
* UsageRecord
had the same customer, dimension, and time, but a
* different quantity.
The timestamp
value passed in the UsageRecord
is out of
* allowed range.
For BatchMeterUsage
, if any of the records are outside of the allowed
+ *
For BatchMeterUsage
, if any of the records are outside of the allowed
* range, the entire batch is not processed. You must remove invalid records and try
* again.
The set of UsageAllocations
to submit.
The sum of all UsageAllocation
quantities must equal the
+ *
The sum of all UsageAllocation
quantities must equal the
* UsageQuantity
of the MeterUsage
request, and each
* UsageAllocation
must have a unique set of tags (include no
* tags).
itemId
) and rating (eventValue
)
* , you might also send the number of movie ratings made by the user.
* Each item in the map consists of a key-value pair. For example,
- * *
* \{"numberOfRatings": "12"\}
*
This is the Recycle Bin API Reference. This documentation provides descriptions and syntax for each of the actions and data types in Recycle Bin.
-Recycle Bin is a resource recovery feature that enables you to restore accidentally deleted snapshots and EBS-backed AMIs. When using Recycle Bin, if your resources are deleted, they are retained in the Recycle Bin for a time period that you specify.
-You can restore a resource from the Recycle Bin at any time before its retention period expires. After you restore a resource from the Recycle Bin, the resource is removed from the Recycle Bin, and you can then use it in the same way you use any other resource of that type diff --git a/clients/client-rbin/src/Rbin.ts b/clients/client-rbin/src/Rbin.ts index f9709ab1efefe..c5edd24df7c39 100644 --- a/clients/client-rbin/src/Rbin.ts +++ b/clients/client-rbin/src/Rbin.ts @@ -157,11 +157,9 @@ export interface Rbin { * @public *
This is the Recycle Bin API Reference. This documentation provides * descriptions and syntax for each of the actions and data types in Recycle Bin.
- * *Recycle Bin is a resource recovery feature that enables you to restore accidentally * deleted snapshots and EBS-backed AMIs. When using Recycle Bin, if your resources are * deleted, they are retained in the Recycle Bin for a time period that you specify.
- * *You can restore a resource from the Recycle Bin at any time before its retention period * expires. After you restore a resource from the Recycle Bin, the resource is removed from the * Recycle Bin, and you can then use it in the same way you use any other resource of that type diff --git a/clients/client-rbin/src/RbinClient.ts b/clients/client-rbin/src/RbinClient.ts index 675745610a958..1f5e1d4dda593 100644 --- a/clients/client-rbin/src/RbinClient.ts +++ b/clients/client-rbin/src/RbinClient.ts @@ -276,11 +276,9 @@ export interface RbinClientResolvedConfig extends RbinClientResolvedConfigType { * @public *
This is the Recycle Bin API Reference. This documentation provides * descriptions and syntax for each of the actions and data types in Recycle Bin.
- * *Recycle Bin is a resource recovery feature that enables you to restore accidentally * deleted snapshots and EBS-backed AMIs. When using Recycle Bin, if your resources are * deleted, they are retained in the Recycle Bin for a time period that you specify.
- * *You can restore a resource from the Recycle Bin at any time before its retention period
* expires. After you restore a resource from the Recycle Bin, the resource is removed from the
* Recycle Bin, and you can then use it in the same way you use any other resource of that type
diff --git a/clients/client-rbin/src/endpoint/EndpointParameters.ts b/clients/client-rbin/src/endpoint/EndpointParameters.ts
index 2995d7fa16dfd..b94370760527a 100644
--- a/clients/client-rbin/src/endpoint/EndpointParameters.ts
+++ b/clients/client-rbin/src/endpoint/EndpointParameters.ts
@@ -27,7 +27,7 @@ export const resolveClientEndpointParameters = This is the Recycle Bin API Reference. This documentation provides
* descriptions and syntax for each of the actions and data types in Recycle Bin. Recycle Bin is a resource recovery feature that enables you to restore accidentally
* deleted snapshots and EBS-backed AMIs. When using Recycle Bin, if your resources are
* deleted, they are retained in the Recycle Bin for a time period that you specify. You can restore a resource from the Recycle Bin at any time before its retention period
* expires. After you restore a resource from the Recycle Bin, the resource is removed from the
* Recycle Bin, and you can then use it in the same way you use any other resource of that type
diff --git a/clients/client-rds-data/src/endpoint/ruleset.ts b/clients/client-rds-data/src/endpoint/ruleset.ts
index 5b5e1089f14c1..e6c8805fd5680 100644
--- a/clients/client-rds-data/src/endpoint/ruleset.ts
+++ b/clients/client-rds-data/src/endpoint/ruleset.ts
@@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types";
or see "smithy.rules#endpointRuleSet"
in codegen/sdk-codegen/aws-models/rds-data.json */
-const p="required",
-q="fn",
-r="argv",
-s="ref";
-const a="PartitionResult",
+const q="required",
+r="fn",
+s="argv",
+t="ref";
+const a="isSet",
b="tree",
c="error",
d="endpoint",
-e={[p]:false,"type":"String"},
-f={[p]:true,"default":false,"type":"Boolean"},
-g={[s]:"Endpoint"},
-h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]},
-i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]},
-j={},
-k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]},
-l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]},
-m=[g],
-n=[h],
-o=[i];
-const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://rds-data-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{endpoint:{url:"https://rds-data-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://rds-data.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://rds-data.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]};
+e="PartitionResult",
+f={[q]:false,"type":"String"},
+g={[q]:true,"default":false,"type":"Boolean"},
+h={[t]:"Endpoint"},
+i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]},
+j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]},
+k={},
+l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]},
+m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]},
+n=[i],
+o=[j],
+p=[{[t]:"Region"}];
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://rds-data-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://rds-data-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://rds-data.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://rds-data.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-redshift-data/src/endpoint/ruleset.ts b/clients/client-redshift-data/src/endpoint/ruleset.ts
index 4698f313a5fc6..a453c982c65ee 100644
--- a/clients/client-redshift-data/src/endpoint/ruleset.ts
+++ b/clients/client-redshift-data/src/endpoint/ruleset.ts
@@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"
n=[i],
o=[j],
p=[{[t]:"Region"}];
-const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://redshift-data-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://redshift-data-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://redshift-data.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://redshift-data.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]};
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://redshift-data-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://redshift-data-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://redshift-data.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://redshift-data.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-redshift-serverless/src/endpoint/EndpointParameters.ts b/clients/client-redshift-serverless/src/endpoint/EndpointParameters.ts
index a5d4dd0207ab7..0ec3bd7545414 100644
--- a/clients/client-redshift-serverless/src/endpoint/EndpointParameters.ts
+++ b/clients/client-redshift-serverless/src/endpoint/EndpointParameters.ts
@@ -27,7 +27,7 @@ export const resolveClientEndpointParameters = Describes the status of the You can call this operation only from the organization's
+ * You can call this operation only from the organization's
* management account and from the us-east-1 Region. The request was denied because performing this operation violates a constraint. Some of the reasons in the following list might not apply to this specific
+ * Some of the reasons in the following list might not apply to this specific
* operation. You must meet the prerequisites for using tag policies. For information, see
+ * You must meet the prerequisites for using tag policies. For information, see
* Prerequisites and Permissions for Using Tag Policies in the
* Organizations User Guide.
* You must enable the tag policies service principal
+ * You must enable the tag policies service principal
* ( You must have a tag policy attached to the organization root, an OU, or an
+ * You must have a tag policy attached to the organization root, an OU, or an
* account. This error indicates one of the following: A parameter is missing. A parameter is missing. A malformed string was supplied for the request parameter. A malformed string was supplied for the request parameter. An out-of-range value was supplied for the request parameter. An out-of-range value was supplied for the request parameter. The target ID is invalid, unsupported, or doesn't exist. The target ID is invalid, unsupported, or doesn't exist. You can't access the Amazon S3 bucket for report storage. For more information, see
+ * You can't access the Amazon S3 bucket for report storage. For more information, see
* Additional Requirements for Organization-wide Tag Compliance
* Reports in the Organizations User Guide.
* Returns a table that shows counts of resources that are noncompliant with their tag
* policies. For more information on tag policies, see Tag Policies in
+ * For more information on tag policies, see Tag Policies in
* the Organizations User Guide.
* You can call this operation only from the organization's
+ * You can call this operation only from the organization's
* management account and from the us-east-1 Region. This operation supports pagination, where the response can be sent in
+ * This operation supports pagination, where the response can be sent in
* multiple pages. You should check the The request was denied because performing this operation violates a constraint. Some of the reasons in the following list might not apply to this specific
+ * Some of the reasons in the following list might not apply to this specific
* operation. You must meet the prerequisites for using tag policies. For information, see
+ * You must meet the prerequisites for using tag policies. For information, see
* Prerequisites and Permissions for Using Tag Policies in the
* Organizations User Guide.
* You must enable the tag policies service principal
+ * You must enable the tag policies service principal
* ( You must have a tag policy attached to the organization root, an OU, or an
+ * You must have a tag policy attached to the organization root, an OU, or an
* account. This error indicates one of the following: A parameter is missing. A parameter is missing. A malformed string was supplied for the request parameter. A malformed string was supplied for the request parameter. An out-of-range value was supplied for the request parameter. An out-of-range value was supplied for the request parameter. The target ID is invalid, unsupported, or doesn't exist. The target ID is invalid, unsupported, or doesn't exist. You can't access the Amazon S3 bucket for report storage. For more information, see
+ * You can't access the Amazon S3 bucket for report storage. For more information, see
* Additional Requirements for Organization-wide Tag Compliance
* Reports in the Organizations User Guide.
* Returns all the tagged or previously tagged resources that are located in the
* specified Amazon Web Services Region for the account. Depending on what information you want returned, you can also specify the
+ * Depending on what information you want returned, you can also specify the
* following:
+ *
* Filters that specify what tags and resource types you
* want returned. The response includes all tags that are associated with the
* requested resources. Information about compliance with the account's effective tag policy. For more
+ * Information about compliance with the account's effective tag policy. For more
* information on tag policies, see Tag
* Policies in the Organizations User Guide.
* This operation supports pagination, where the response can be sent in
+ * This operation supports pagination, where the response can be sent in
* multiple pages. You should check the This error indicates one of the following: A parameter is missing. A parameter is missing. A malformed string was supplied for the request parameter. A malformed string was supplied for the request parameter. An out-of-range value was supplied for the request parameter. An out-of-range value was supplied for the request parameter. The target ID is invalid, unsupported, or doesn't exist. The target ID is invalid, unsupported, or doesn't exist. You can't access the Amazon S3 bucket for report storage. For more information, see
+ * You can't access the Amazon S3 bucket for report storage. For more information, see
* Additional Requirements for Organization-wide Tag Compliance
* Reports in the Organizations User Guide.
* Returns all tag keys currently in use in the specified Amazon Web Services Region for the calling
* account. This operation supports pagination, where the response can be sent in
+ * This operation supports pagination, where the response can be sent in
* multiple pages. You should check the This error indicates one of the following: A parameter is missing. A parameter is missing. A malformed string was supplied for the request parameter. A malformed string was supplied for the request parameter. An out-of-range value was supplied for the request parameter. An out-of-range value was supplied for the request parameter. The target ID is invalid, unsupported, or doesn't exist. The target ID is invalid, unsupported, or doesn't exist. You can't access the Amazon S3 bucket for report storage. For more information, see
+ * You can't access the Amazon S3 bucket for report storage. For more information, see
* Additional Requirements for Organization-wide Tag Compliance
* Reports in the Organizations User Guide.
* Returns all tag values for the specified key that are used in the specified Amazon Web Services
* Region for the calling account. This operation supports pagination, where the response can be sent in
+ * This operation supports pagination, where the response can be sent in
* multiple pages. You should check the This error indicates one of the following: A parameter is missing. A parameter is missing. A malformed string was supplied for the request parameter. A malformed string was supplied for the request parameter. An out-of-range value was supplied for the request parameter. An out-of-range value was supplied for the request parameter. The target ID is invalid, unsupported, or doesn't exist. The target ID is invalid, unsupported, or doesn't exist. You can't access the Amazon S3 bucket for report storage. For more information, see
+ * You can't access the Amazon S3 bucket for report storage. For more information, see
* Additional Requirements for Organization-wide Tag Compliance
* Reports in the Organizations User Guide.
* Generates a report that lists all tagged resources in the accounts across your
* organization and tells whether each resource is compliant with the effective tag policy.
* Compliance data is refreshed daily. The report is generated asynchronously. The generated report is saved to the following location:
+ * The generated report is saved to the following location:
* You can call this operation only from the organization's
+ * You can call this operation only from the organization's
* management account and from the us-east-1 Region. The request was denied because performing this operation violates a constraint. Some of the reasons in the following list might not apply to this specific
+ * Some of the reasons in the following list might not apply to this specific
* operation. You must meet the prerequisites for using tag policies. For information, see
+ * You must meet the prerequisites for using tag policies. For information, see
* Prerequisites and Permissions for Using Tag Policies in the
* Organizations User Guide.
* You must enable the tag policies service principal
+ * You must enable the tag policies service principal
* ( You must have a tag policy attached to the organization root, an OU, or an
+ * You must have a tag policy attached to the organization root, an OU, or an
* account. This error indicates one of the following: A parameter is missing. A parameter is missing. A malformed string was supplied for the request parameter. A malformed string was supplied for the request parameter. An out-of-range value was supplied for the request parameter. An out-of-range value was supplied for the request parameter. The target ID is invalid, unsupported, or doesn't exist. The target ID is invalid, unsupported, or doesn't exist. You can't access the Amazon S3 bucket for report storage. For more information, see
+ * You can't access the Amazon S3 bucket for report storage. For more information, see
* Additional Requirements for Organization-wide Tag Compliance
* Reports in the Organizations User Guide.
* Applies one or more tags to the specified resources. Note the following: Not all resources can have tags. For a list of services with resources that
+ * Not all resources can have tags. For a list of services with resources that
* support tagging using this operation, see Services that support the
* Resource Groups Tagging API. If the resource doesn't yet support
* this operation, the resource's service might support tagging using its own API
@@ -51,41 +51,41 @@ export interface TagResourcesCommandOutput extends TagResourcesOutput, __Metadat
* service. Each resource can have up to 50 tags. For other limits, see Tag Naming and Usage Conventions in the Amazon Web Services General
+ * Each resource can have up to 50 tags. For other limits, see Tag Naming and Usage Conventions in the Amazon Web Services General
* Reference.
* You can only tag resources that are located in the specified Amazon Web Services Region for
+ * You can only tag resources that are located in the specified Amazon Web Services Region for
* the Amazon Web Services account. To add tags to a resource, you need the necessary permissions for the service
+ * To add tags to a resource, you need the necessary permissions for the service
* that the resource belongs to as well as permissions for adding tags. For more
* information, see the documentation for each service. Do not store personally identifiable information (PII) or other confidential or
* sensitive information in tags. We use tags to provide you with billing and
* administration services. Tags are not intended to be used for private or sensitive
* data.
+ *
* Minimum permissions
* In addition to the In addition to the
+ *
*
+ *
* This error indicates one of the following: A parameter is missing. A parameter is missing. A malformed string was supplied for the request parameter. A malformed string was supplied for the request parameter. An out-of-range value was supplied for the request parameter. An out-of-range value was supplied for the request parameter. The target ID is invalid, unsupported, or doesn't exist. The target ID is invalid, unsupported, or doesn't exist. You can't access the Amazon S3 bucket for report storage. For more information, see
+ * You can't access the Amazon S3 bucket for report storage. For more information, see
* Additional Requirements for Organization-wide Tag Compliance
* Reports in the Organizations User Guide.
* To remove tags from a resource, you need the necessary permissions for the
+ * To remove tags from a resource, you need the necessary permissions for the
* service that the resource belongs to as well as permissions for removing tags.
* For more information, see the documentation for the service whose resource you
* want to untag. You can only tag resources that are located in the specified Amazon Web Services Region for
+ * You can only tag resources that are located in the specified Amazon Web Services Region for
* the calling Amazon Web Services account.
+ *
* Minimum permissions
* In addition to the In addition to the
+ *
*
+ *
* This error indicates one of the following: A parameter is missing. A parameter is missing. A malformed string was supplied for the request parameter. A malformed string was supplied for the request parameter. An out-of-range value was supplied for the request parameter. An out-of-range value was supplied for the request parameter. The target ID is invalid, unsupported, or doesn't exist. The target ID is invalid, unsupported, or doesn't exist. You can't access the Amazon S3 bucket for report storage. For more information, see
+ * You can't access the Amazon S3 bucket for report storage. For more information, see
* Additional Requirements for Organization-wide Tag Compliance
* Reports in the Organizations User Guide.
* The request was denied because performing this operation violates a constraint. Some of the reasons in the following list might not apply to this specific
+ * Some of the reasons in the following list might not apply to this specific
* operation. You must meet the prerequisites for using tag policies. For information, see
+ * You must meet the prerequisites for using tag policies. For information, see
* Prerequisites and Permissions for Using Tag Policies in the
* Organizations User Guide.
* You must enable the tag policies service principal
+ * You must enable the tag policies service principal
* ( You must have a tag policy attached to the organization root, an OU, or an
+ * You must have a tag policy attached to the organization root, an OU, or an
* account. Reports the status of the operation. The operation status can be one of the following: The operation status can be one of the following:
+ *
*
+ *
*
+ *
*
+ *
* This error indicates one of the following: A parameter is missing. A parameter is missing. A malformed string was supplied for the request parameter. A malformed string was supplied for the request parameter. An out-of-range value was supplied for the request parameter. An out-of-range value was supplied for the request parameter. The target ID is invalid, unsupported, or doesn't exist. The target ID is invalid, unsupported, or doesn't exist. You can't access the Amazon S3 bucket for report storage. For more information, see
+ * You can't access the Amazon S3 bucket for report storage. For more information, see
* Additional Requirements for Organization-wide Tag Compliance
* Reports in the Organizations User Guide.
* StartReportCreation
operation.
+ *
*
@@ -95,21 +95,21 @@ export interface DescribeReportCreationCommandOutput extends DescribeReportCreat
*
* @throws {@link InvalidParameterException} (client fault)
* tagpolicies.tag.amazonaws.com
) to integrate with Organizations For
* information, see EnableAWSServiceAccess.
+ *
*
PaginationToken
response parameter to determine
* if there are additional results available to return. Repeat the query, passing the
* PaginationToken
response parameter value as an input to the next request until you
@@ -73,7 +73,7 @@ export interface GetComplianceSummaryCommandOutput extends GetComplianceSummaryO
* "STRING_VALUE",
* ],
* GroupBy: [ // GroupBy
- * "STRING_VALUE",
+ * "TARGET_ID" || "REGION" || "RESOURCE_TYPE",
* ],
* MaxResults: Number("int"),
* PaginationToken: "STRING_VALUE",
@@ -85,7 +85,7 @@ export interface GetComplianceSummaryCommandOutput extends GetComplianceSummaryO
* // { // Summary
* // LastUpdated: "STRING_VALUE",
* // TargetId: "STRING_VALUE",
- * // TargetIdType: "STRING_VALUE",
+ * // TargetIdType: "ACCOUNT" || "OU" || "ROOT",
* // Region: "STRING_VALUE",
* // ResourceType: "STRING_VALUE",
* // NonCompliantResources: Number("long"),
@@ -104,22 +104,22 @@ export interface GetComplianceSummaryCommandOutput extends GetComplianceSummaryO
*
* @throws {@link ConstraintViolationException} (client fault)
*
+ *
*
@@ -130,21 +130,21 @@ export interface GetComplianceSummaryCommandOutput extends GetComplianceSummaryO
*
* @throws {@link InvalidParameterException} (client fault)
* tagpolicies.tag.amazonaws.com
) to integrate with Organizations For
* information, see EnableAWSServiceAccess.
+ *
*
+ *
*
- * PaginationToken
response parameter to determine
* if there are additional results available to return. Repeat the query, passing the
* PaginationToken
response parameter value as an input to the next request until you
@@ -131,21 +131,21 @@ export interface GetResourcesCommandOutput extends GetResourcesOutput, __Metadat
*
* @throws {@link InvalidParameterException} (client fault)
*
+ *
*
PaginationToken
response parameter to determine
* if there are additional results available to return. Repeat the query, passing the
* PaginationToken
response parameter value as an input to the next request until you
@@ -80,21 +80,21 @@ export interface GetTagKeysCommandOutput extends GetTagKeysOutput, __MetadataBea
*
* @throws {@link InvalidParameterException} (client fault)
*
+ *
*
PaginationToken
response parameter to determine
* if there are additional results available to return. Repeat the query, passing the
* PaginationToken
response parameter value as an input to the next request until you
@@ -81,21 +81,21 @@ export interface GetTagValuesCommandOutput extends GetTagValuesOutput, __Metadat
*
* @throws {@link InvalidParameterException} (client fault)
*
+ *
*
s3://example-bucket/AwsTagPolicies/o-exampleorgid/YYYY-MM-ddTHH:mm:ssZ/report.csv
*
+ *
*
@@ -102,21 +102,21 @@ export interface StartReportCreationCommandOutput extends StartReportCreationOut
*
* @throws {@link InvalidParameterException} (client fault)
* tagpolicies.tag.amazonaws.com
) to integrate with Organizations For
* information, see EnableAWSServiceAccess.
+ *
*
+ *
*
- * tag:TagResources
permission required by this
+ * tag:TagResources
permission required by this
* operation, you must also have the tagging permission defined by the service that created
* the resource. For example, to tag an Amazon EC2 instance using the TagResources
* operation, you must have both of the following permissions:
+ *
*
tag:TagResource
* ec2:CreateTags
*
+ *
*
+ *
*
- *
- * tag:UntagResources
permission required by this
+ * tag:UntagResources
permission required by this
* operation, you must also have the remove tags permission defined by the service that
* created the resource. For example, to remove the tags from an Amazon EC2 instance using the
* UntagResources
operation, you must have both of the following
* permissions:
+ *
*
tag:UntagResource
* ec2:DeleteTags
*
+ *
*
+ *
*
@@ -105,25 +105,25 @@ export interface DescribeReportCreationOutput {
/**
* @public
* tagpolicies.tag.amazonaws.com
) to integrate with Organizations For
* information, see EnableAWSServiceAccess.
+ *
*
@@ -175,21 +175,21 @@ export class InternalServiceException extends __BaseException {
/**
* @public
* RUNNING
- Report creation is in progress.SUCCEEDED
- Report creation is complete. You can open the report
* from the Amazon S3 bucket that you specified when you ran
* StartReportCreation
.FAILED
- Report creation timed out or the Amazon S3 bucket is not
* accessible. NO REPORT
- No report was generated in the last 90 days.
+ *
*
InvalidParameterException
errors. It can also include any valid error
* code returned by the Amazon Web Services service that hosts the resource that the ARN key
* represents.
The following are common error codes that you might receive from other Amazon Web Services + *
The following are common error codes that you might receive from other Amazon Web Services * services:
- *+ *
* InternalServiceException – This can * mean that the Resource Groups Tagging API didn't receive a response from another Amazon Web Services service. It * can also mean that the resource type in the request is not supported by the * Resource Groups Tagging API. In these cases, it's safe to retry the request and then call GetResources to verify the changes.
*+ *
* AccessDeniedException – This can mean
* that you need permission to call the tagging operations in the Amazon Web Services service
* that contains the resource. For example, to use the Resource Groups Tagging API to tag a Amazon CloudWatch
* alarm resource, you need permission to call both
* TagResources
*
- * and
- *
+ * and
+ *
* TagResource
* in the CloudWatch API.
For more information on errors that are generated from other Amazon Web Services services, see the + *
For more information on errors that are generated from other Amazon Web Services services, see the * documentation for that service.
*/ export interface FailureInfo { @@ -350,25 +350,25 @@ export interface GetComplianceSummaryInput { *ec2
returns all Amazon EC2 resources (which includes EC2 instances).
* Specifying a resource type of ec2:instance
returns only EC2
* instances.
- * The string for each service name and resource type is the same as that embedded in a + *
The string for each service name and resource type is the same as that embedded in a * resource's Amazon Resource Name (ARN). Consult the * Amazon Web Services General Reference * * for the following:
- *For a list of service name strings, see Amazon Web Services Service Namespaces.
+ *For a list of service name strings, see Amazon Web Services Service Namespaces.
*For resource type strings, see Example
+ * For resource type strings, see Example
* ARNs.
For more information about ARNs, see Amazon Resource Names
+ * For more information about ARNs, see Amazon Resource Names
* (ARNs) and Amazon Web Services Service Namespaces.
You can specify multiple resource types by using a comma separated array. The array + *
You can specify multiple resource types by using a comma separated array. The array * can include up to 100 items. Note that the length constraint requirement applies to each * resource type filter.
*/ @@ -523,55 +523,55 @@ export interface GetResourcesInput { * resources that have tags with the specified keys and, if included, the specified values. * EachTagFilter
must contain a key with values optional. A request can
* include up to 50 keys, and each key can include up to 20 values.
- * Note the following when deciding how to use TagFilters:
- *Note the following when deciding how to use TagFilters:
+ *If you don't specify a TagFilter
, the
+ *
If you don't specify a TagFilter
, the
* response includes all resources that are currently tagged or ever had a tag.
* Resources that currently don't have tags are shown with an empty tag set, like
* this: "Tags": []
.
If you specify more than one filter in a single request, the response returns + *
If you specify more than one filter in a single request, the response returns * only those resources that satisfy all filters.
*If you specify a filter that contains more than one value for a key, the + *
If you specify a filter that contains more than one value for a key, the * response returns resources that match any of the specified * values for that key.
*If you don't specify a value for a key, the response returns all resources + *
If you don't specify a value for a key, the response returns all resources * that are tagged with that key, with any or no value.
- *For example, for the following filters: filter1= \{keyA,\{value1\}\}
,
+ *
For example, for the following filters: filter1= \{keyA,\{value1\}\}
,
* filter2=\{keyB,\{value2,value3,value4\}\}
, filter3=
* \{keyC\}
:
+ *
* GetResources(\{filter1\})
returns resources tagged with
* key1=value1
*
+ *
* GetResources(\{filter2\})
returns resources tagged with
* key2=value2
or key2=value3
or
* key2=value4
*
+ *
* GetResources(\{filter3\})
returns resources tagged with any
* tag with the key key3
, and with any or no value
+ *
* GetResources(\{filter1,filter2,filter3\})
returns resources
* tagged with (key1=value1) and (key2=value2 or key2=value3 or
* key2=value4) and (key3, any or no value)
*
Amazon Web Services recommends using ResourcesPerPage
instead of this parameter.
A limit that restricts the number of tags (key and value pairs) returned by + *
A limit that restricts the number of tags (key and value pairs) returned by
* GetResources
in paginated output. A resource with no tags is counted as
* having one tag (one key and value pair).
+ *
* GetResources
does not split a resource and its associated tags across
* pages. If the specified TagsPerPage
would cause such a break, a
* PaginationToken
is returned in place of the affected resource and its
@@ -603,7 +603,7 @@ export interface GetResourcesInput {
* will consist of three pages. The first page displays the first 10 resources, each with
* its 10 tags. The second page displays the next 10 resources, each with its 10 tags. The
* third page displays the remaining 2 resources, each with its 10 tags.
You can set TagsPerPage
to a minimum of 100 items up to a maximum of 500
+ *
You can set TagsPerPage
to a minimum of 100 items up to a maximum of 500
* items.
ec2
returns all Amazon EC2 resources (which includes EC2
* instances). Specifying a resource type of ec2:instance
returns only EC2
* instances.
- * The string for each service name and resource type is the same as that embedded in a + *
The string for each service name and resource type is the same as that embedded in a * resource's Amazon Resource Name (ARN). For the list of services whose resources you can * use in this parameter, see Services that support the Resource Groups Tagging API.
- *You can specify multiple resource types by using an array. The array can include up to + *
You can specify multiple resource types by using an array. The array can include up to * 100 items. Note that the length constraint requirement applies to each resource type * filter. For example, the following string would limit the response to only Amazon EC2 * instances, Amazon S3 buckets, or any Audit Manager resource:
- *+ *
* ec2:instance,s3:bucket,auditmanager
*
Specifies whether to exclude resources that are compliant with the tag policy. Set
* this to true
if you are interested in retrieving information on
* noncompliant resources only.
You can use this parameter only if the IncludeComplianceDetails
parameter
+ *
You can use this parameter only if the IncludeComplianceDetails
parameter
* is also set to true
.
ResourcesPerPage
, TagsPerPage
,
* PaginationToken
) in the same request. If you specify both, you get an
* Invalid Parameter
exception.
- * If a resource specified by this parameter doesn't exist, it doesn't generate an error; + *
If a resource specified by this parameter doesn't exist, it doesn't generate an error; * it simply isn't included in the response.
- *An ARN (Amazon Resource Name) uniquely identifies a resource. For more information, + *
An ARN (Amazon Resource Name) uniquely identifies a resource. For more information, * see Amazon * Resource Names (ARNs) and Amazon Web Services Service Namespaces in the * Amazon Web Services General Reference.
@@ -833,10 +833,10 @@ export interface StartReportCreationInput { /** * @public *The name of the Amazon S3 bucket where the report will be stored; for example:
- *+ *
* awsexamplebucket
*
For more information on S3 bucket requirements, including an example bucket policy, + *
For more information on S3 bucket requirements, including an example bucket policy, * see the example S3 bucket policy on this page.
*/ S3Bucket: string | undefined; @@ -854,7 +854,7 @@ export interface TagResourcesInput { /** * @public *Specifies the list of ARNs of the resources that you want to apply tags to.
- *An ARN (Amazon Resource Name) uniquely identifies a resource. For more information, + *
An ARN (Amazon Resource Name) uniquely identifies a resource. For more information, * see Amazon * Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services * General Reference.
@@ -890,7 +890,7 @@ export interface UntagResourcesInput { /** * @public *Specifies a list of ARNs of the resources that you want to remove tags from.
- *An ARN (Amazon Resource Name) uniquely identifies a resource. For more information, + *
An ARN (Amazon Resource Name) uniquely identifies a resource. For more information, * see Amazon * Resource Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services * General Reference.
diff --git a/clients/client-resource-groups/src/endpoint/EndpointParameters.ts b/clients/client-resource-groups/src/endpoint/EndpointParameters.ts index 3d085b2eb3213..b2dd56eabc587 100644 --- a/clients/client-resource-groups/src/endpoint/EndpointParameters.ts +++ b/clients/client-resource-groups/src/endpoint/EndpointParameters.ts @@ -27,7 +27,7 @@ export const resolveClientEndpointParameters =Welcome to the Routing Control (Recovery Cluster) API Reference Guide for Amazon Route 53 Application Recovery Controller.
- *With Route 53 ARC, you can use routing control with extreme reliability to + *
With Route 53 ARC, you can use routing control with extreme reliability to * recover applications by rerouting traffic across * Availability Zones or Amazon Web Services Regions. Routing controls are simple on/off switches hosted * on a highly available cluster in Route 53 ARC. A cluster provides a set of five redundant Regional endpoints against which you * can run API calls to get or update the state of routing controls. To implement failover, you set * one routing control On and another one Off, to reroute traffic from one Availability Zone or Amazon Web Services Region * to another.
- *+ *
* Be aware that you must specify a Regional endpoint for a cluster when you work with API cluster operations
* to get or update routing control states in Route 53 ARC. In addition, you must specify the US West (Oregon) Region
* for Route 53 ARC API calls. For example, use the parameter --region us-west-2
with AWS CLI commands.
* For more information, see
*
* Get and update routing control states using the API in the Amazon Route 53 Application Recovery Controller Developer Guide.
This API guide includes information about the API operations for how to get and update routing control states + *
This API guide includes information about the API operations for how to get and update routing control states * in Route 53 ARC. To work with routing control in Route 53 ARC, you must first create the required components (clusters, control * panels, and routing controls) using the recovery cluster configuration API.
- *For more information about working with routing control in Route 53 ARC, see the following:
- *For more information about working with routing control in Route 53 ARC, see the following:
+ *Create clusters, control panels, and routing controls by using API operations. For more information, + *
Create clusters, control panels, and routing controls by using API operations. For more information, * see the Recovery Control Configuration API Reference Guide for Amazon Route 53 Application Recovery Controller.
- *Learn about the components in recovery control, including clusters, * routing controls, and control panels, and how to work with Route 53 ARC in the Amazon Web Services console. For more @@ -134,14 +134,14 @@ export interface Route53RecoveryCluster { * Recovery control components in the Amazon Route 53 Application Recovery Controller Developer Guide.
*Route 53 ARC also provides readiness checks that continually audit resources to help make sure that your + *
Route 53 ARC also provides readiness checks that continually audit resources to help make sure that your * applications are scaled and ready to handle failover traffic. For more information about * the related API operations, see the Recovery Readiness API Reference Guide for Amazon Route 53 Application Recovery Controller.
- *For more information about creating resilient applications and preparing for + *
For more information about creating resilient applications and preparing for * recovery readiness with Route 53 ARC, see the Amazon Route 53 Application Recovery Controller Developer Guide.
- *Welcome to the Routing Control (Recovery Cluster) API Reference Guide for Amazon Route 53 Application Recovery Controller.
- *With Route 53 ARC, you can use routing control with extreme reliability to + *
With Route 53 ARC, you can use routing control with extreme reliability to * recover applications by rerouting traffic across * Availability Zones or Amazon Web Services Regions. Routing controls are simple on/off switches hosted * on a highly available cluster in Route 53 ARC. A cluster provides a set of five redundant Regional endpoints against which you * can run API calls to get or update the state of routing controls. To implement failover, you set * one routing control On and another one Off, to reroute traffic from one Availability Zone or Amazon Web Services Region * to another.
- *+ *
* Be aware that you must specify a Regional endpoint for a cluster when you work with API cluster operations
* to get or update routing control states in Route 53 ARC. In addition, you must specify the US West (Oregon) Region
* for Route 53 ARC API calls. For example, use the parameter --region us-west-2
with AWS CLI commands.
* For more information, see
*
* Get and update routing control states using the API in the Amazon Route 53 Application Recovery Controller Developer Guide.
This API guide includes information about the API operations for how to get and update routing control states + *
This API guide includes information about the API operations for how to get and update routing control states * in Route 53 ARC. To work with routing control in Route 53 ARC, you must first create the required components (clusters, control * panels, and routing controls) using the recovery cluster configuration API.
- *For more information about working with routing control in Route 53 ARC, see the following:
- *For more information about working with routing control in Route 53 ARC, see the following:
+ *Create clusters, control panels, and routing controls by using API operations. For more information, + *
Create clusters, control panels, and routing controls by using API operations. For more information, * see the Recovery Control Configuration API Reference Guide for Amazon Route 53 Application Recovery Controller.
- *Learn about the components in recovery control, including clusters, * routing controls, and control panels, and how to work with Route 53 ARC in the Amazon Web Services console. For more @@ -296,14 +296,14 @@ export interface Route53RecoveryClusterClientResolvedConfig extends Route53Recov * Recovery control components in the Amazon Route 53 Application Recovery Controller Developer Guide.
*Route 53 ARC also provides readiness checks that continually audit resources to help make sure that your + *
Route 53 ARC also provides readiness checks that continually audit resources to help make sure that your * applications are scaled and ready to handle failover traffic. For more information about * the related API operations, see the Recovery Readiness API Reference Guide for Amazon Route 53 Application Recovery Controller.
- *For more information about creating resilient applications and preparing for + *
For more information about creating resilient applications and preparing for * recovery readiness with Route 53 ARC, see the Amazon Route 53 Application Recovery Controller Developer Guide.
- *Get the state for a routing control. A routing control is a simple on/off switch that you * can use to route traffic to cells. When a routing control state is On, traffic flows to a cell. When * the state is Off, traffic does not flow.
- *Before you can create a routing control, you must first create a cluster, and then host the control + *
Before you can create a routing control, you must first create a cluster, and then host the control * in a control panel on the cluster. For more information, see * Create routing control structures in the Amazon Route 53 Application Recovery Controller Developer Guide. * You access one of the endpoints for the cluster to get or update the routing control state to * redirect traffic for your application.
- *+ *
* You must specify Regional endpoints when you work with API cluster operations * to get or update routing control states in Route 53 ARC. *
- *To see a code example for getting a routing control state, including accessing Regional cluster endpoints + *
To see a code example for getting a routing control state, including accessing Regional cluster endpoints * in sequence, see API examples * in the Amazon Route 53 Application Recovery Controller Developer Guide.
- *Learn more about working with routing controls in the following topics in the + *
Learn more about working with routing controls in the following topics in the * Amazon Route 53 Application Recovery Controller Developer Guide:
- *A routing control is a simple on/off switch in Route 53 ARC that you + *
A routing control is a simple on/off switch in Route 53 ARC that you * can use to route traffic to cells. When a routing control state is On, traffic flows to a cell. When * the state is Off, traffic does not flow.
- *Before you can create a routing control, you must first create a cluster, and then host the control + *
Before you can create a routing control, you must first create a cluster, and then host the control * in a control panel on the cluster. For more information, see * Create routing control structures in the Amazon Route 53 Application Recovery Controller Developer Guide. * You access one of the endpoints for the cluster to get or update the routing control state to * redirect traffic for your application.
- *+ *
* You must specify Regional endpoints when you work with API cluster operations * to use this API operation to list routing controls in Route 53 ARC. *
- *Learn more about working with routing controls in the following topics in the + *
Learn more about working with routing controls in the following topics in the * Amazon Route 53 Application Recovery Controller Developer Guide:
- *
*
@@ -92,7 +92,7 @@ export interface ListRoutingControlsCommandOutput extends ListRoutingControlsRes
* // ControlPanelName: "STRING_VALUE",
* // RoutingControlArn: "STRING_VALUE",
* // RoutingControlName: "STRING_VALUE",
- * // RoutingControlState: "STRING_VALUE",
+ * // RoutingControlState: "On" || "Off",
* // },
* // ],
* // NextToken: "STRING_VALUE",
diff --git a/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStateCommand.ts b/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStateCommand.ts
index 1280ad32daeb7..663f6405427b1 100644
--- a/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStateCommand.ts
+++ b/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStateCommand.ts
@@ -43,25 +43,25 @@ export interface UpdateRoutingControlStateCommandOutput extends UpdateRoutingCon
* Set the state of the routing control to reroute traffic. You can set the value to be On or
* Off. When the state is On, traffic flows to a cell. When the state is Off, traffic does not
* flow. With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing
+ * With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing
* control state updates that help prevent unexpected outcomes, like fail open traffic routing. However,
* there are scenarios when you might want to bypass the routing control safeguards that are enforced with
* safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery,
* and one or more safety rules might be unexpectedly preventing you from updating a routing control state to
* reroute traffic. In a "break glass" scenario like this, you can override one or more safety rules to change
* a routing control state and fail over your application. The The
+ *
* You must specify Regional endpoints when you work with API cluster operations
* to get or update routing control states in Route 53 ARC.
* To see a code example for getting a routing control state, including accessing Regional cluster endpoints
+ * To see a code example for getting a routing control state, including accessing Regional cluster endpoints
* in sequence, see API examples
* in the Amazon Route 53 Application Recovery Controller Developer Guide.
*
@@ -82,7 +82,7 @@ export interface UpdateRoutingControlStateCommandOutput extends UpdateRoutingCon
* const client = new Route53RecoveryClusterClient(config);
* const input = { // UpdateRoutingControlStateRequest
* RoutingControlArn: "STRING_VALUE", // required
- * RoutingControlState: "STRING_VALUE", // required
+ * RoutingControlState: "On" || "Off", // required
* SafetyRulesToOverride: [ // Arns
* "STRING_VALUE",
* ],
diff --git a/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStatesCommand.ts b/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStatesCommand.ts
index 1644e908128c9..0b1cb45df61d3 100644
--- a/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStatesCommand.ts
+++ b/clients/client-route53-recovery-cluster/src/commands/UpdateRoutingControlStatesCommand.ts
@@ -43,14 +43,14 @@ export interface UpdateRoutingControlStatesCommandOutput extends UpdateRoutingCo
* Set multiple routing control states. You can set the value for each state to be On or Off.
* When the state is On, traffic flows to a cell. When it's Off, traffic does not
* flow. With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing
+ * With Route 53 ARC, you can add safety rules for routing controls, which are safeguards for routing
* control state updates that help prevent unexpected outcomes, like fail open traffic routing. However,
* there are scenarios when you might want to bypass the routing control safeguards that are enforced with
* safety rules that you've configured. For example, you might want to fail over quickly for disaster recovery,
* and one or more safety rules might be unexpectedly preventing you from updating a routing control state to
* reroute traffic. In a "break glass" scenario like this, you can override one or more safety rules to change
* a routing control state and fail over your application. The The To see a code example for getting a routing control state, including accessing Regional cluster endpoints
+ * To see a code example for getting a routing control state, including accessing Regional cluster endpoints
* in sequence, see API examples
* in the Amazon Route 53 Application Recovery Controller Developer Guide.
*
@@ -84,7 +84,7 @@ export interface UpdateRoutingControlStatesCommandOutput extends UpdateRoutingCo
* UpdateRoutingControlStateEntries: [ // UpdateRoutingControlStateEntries // required
* { // UpdateRoutingControlStateEntry
* RoutingControlArn: "STRING_VALUE", // required
- * RoutingControlState: "STRING_VALUE", // required
+ * RoutingControlState: "On" || "Off", // required
* },
* ],
* SafetyRulesToOverride: [ // Arns
diff --git a/clients/client-route53-recovery-cluster/src/endpoint/ruleset.ts b/clients/client-route53-recovery-cluster/src/endpoint/ruleset.ts
index 1d2f0e2eaf344..d1b6f13731a7a 100644
--- a/clients/client-route53-recovery-cluster/src/endpoint/ruleset.ts
+++ b/clients/client-route53-recovery-cluster/src/endpoint/ruleset.ts
@@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types";
or see "smithy.rules#endpointRuleSet"
in codegen/sdk-codegen/aws-models/route53-recovery-cluster.json */
-const p="required",
-q="fn",
-r="argv",
-s="ref";
-const a="PartitionResult",
+const q="required",
+r="fn",
+s="argv",
+t="ref";
+const a="isSet",
b="tree",
c="error",
d="endpoint",
-e={[p]:false,"type":"String"},
-f={[p]:true,"default":false,"type":"Boolean"},
-g={[s]:"Endpoint"},
-h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]},
-i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]},
-j={},
-k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]},
-l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]},
-m=[g],
-n=[h],
-o=[i];
-const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://route53-recovery-cluster-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{endpoint:{url:"https://route53-recovery-cluster-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://route53-recovery-cluster.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://route53-recovery-cluster.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]};
+e="PartitionResult",
+f={[q]:false,"type":"String"},
+g={[q]:true,"default":false,"type":"Boolean"},
+h={[t]:"Endpoint"},
+i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]},
+j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]},
+k={},
+l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]},
+m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]},
+n=[i],
+o=[j],
+p=[{[t]:"Region"}];
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://route53-recovery-cluster-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://route53-recovery-cluster-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://route53-recovery-cluster.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://route53-recovery-cluster.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-route53-recovery-cluster/src/index.ts b/clients/client-route53-recovery-cluster/src/index.ts
index d814baa574652..c33c6ed8f94b2 100644
--- a/clients/client-route53-recovery-cluster/src/index.ts
+++ b/clients/client-route53-recovery-cluster/src/index.ts
@@ -2,29 +2,29 @@
/* eslint-disable */
/**
* Welcome to the Routing Control (Recovery Cluster) API Reference Guide for Amazon Route 53 Application Recovery Controller. With Route 53 ARC, you can use routing control with extreme reliability to
+ * With Route 53 ARC, you can use routing control with extreme reliability to
* recover applications by rerouting traffic across
* Availability Zones or Amazon Web Services Regions. Routing controls are simple on/off switches hosted
* on a highly available cluster in Route 53 ARC. A cluster provides a set of five redundant Regional endpoints against which you
* can run API calls to get or update the state of routing controls. To implement failover, you set
* one routing control On and another one Off, to reroute traffic from one Availability Zone or Amazon Web Services Region
* to another.
+ *
* Be aware that you must specify a Regional endpoint for a cluster when you work with API cluster operations
* to get or update routing control states in Route 53 ARC. In addition, you must specify the US West (Oregon) Region
* for Route 53 ARC API calls. For example, use the parameter This API guide includes information about the API operations for how to get and update routing control states
+ * This API guide includes information about the API operations for how to get and update routing control states
* in Route 53 ARC. To work with routing control in Route 53 ARC, you must first create the required components (clusters, control
* panels, and routing controls) using the recovery cluster configuration API. For more information about working with routing control in Route 53 ARC, see the following: For more information about working with routing control in Route 53 ARC, see the following: Create clusters, control panels, and routing controls by using API operations. For more information,
+ * Create clusters, control panels, and routing controls by using API operations. For more information,
* see the Recovery Control Configuration API Reference Guide for Amazon Route 53 Application Recovery Controller. Learn about the components in recovery control, including clusters,
* routing controls, and control panels, and how to work with Route 53 ARC in the Amazon Web Services console. For more
@@ -32,14 +32,14 @@
* Recovery control componentsSafetyRulesToOverride
property enables you override one or more safety rules and
+ * SafetyRulesToOverride
property enables you override one or more safety rules and
* update routing control states. For more information, see
*
* Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.
+ *
in the Amazon Route 53 Application Recovery Controller Developer Guide.
*
SafetyRulesToOverride
property enables you override one or more safety rules and
+ * SafetyRulesToOverride
property enables you override one or more safety rules and
* update routing control states. For more information, see
*
* Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide.
+ *
*
--region us-west-2
with AWS CLI commands.
* For more information, see
*
* Get and update routing control states using the API in the Amazon Route 53 Application Recovery Controller Developer Guide.
+ *
*
Route 53 ARC also provides readiness checks that continually audit resources to help make sure that your + *
Route 53 ARC also provides readiness checks that continually audit resources to help make sure that your * applications are scaled and ready to handle failover traffic. For more information about * the related API operations, see the Recovery Readiness API Reference Guide for Amazon Route 53 Application Recovery Controller.
- *For more information about creating resilient applications and preparing for + *
For more information about creating resilient applications and preparing for * recovery readiness with Route 53 ARC, see the Amazon Route 53 Application Recovery Controller Developer Guide.
- *The Amazon Resource Names (ARNs) for the safety rules that you want to override when you're updating the state of * a routing control. You can override one safety rule or multiple safety rules by including one or more ARNs, separated * by commas.
- *For more information, see
+ * For more information, see
* Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide. The Amazon Resource Names (ARNs) for the safety rules that you want to override when you're updating routing
* control states. You can override one safety rule or multiple safety rules by including one or more ARNs, separated
* by commas. For more information, see
+ * For more information, see
* Override safety rules to reroute traffic in the Amazon Route 53 Application Recovery Controller Developer Guide. You can associate only one Prior to making the The SRT will have access only to your WAF and Shield resources. By submitting this request, you authorize the SRT to inspect your WAF and Shield configuration and create and update WAF rules and web ACLs on your behalf. The SRT takes these actions only if explicitly authorized by you. You must have the To use the services of the SRT and make an Specifies that Shield Advanced should configure its WAF rules with the WAF This is only used in the context of the This is only used in the context of the JSON specification: Specifies that Shield Advanced should configure its WAF rules with the WAF This is only used in the context of the This is only used in the context of the JSON specification: The Amazon Resource Name (ARN) of the role the SRT will use to access your Amazon Web Services account. Prior to making the Prior to making the The attack type. Valid values: UDP_TRAFFIC When you request a list of objects from Shield Advanced, if the response does not include all of the remaining available objects,
* Shield Advanced includes a You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a On your first call to a list operation, leave this setting empty. When you request a list of objects from Shield Advanced, if the response does not include all of the remaining available objects,
* Shield Advanced includes a You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a When you request a list of objects from Shield Advanced, if the response does not include all of the remaining available objects,
* Shield Advanced includes a You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a On your first call to a list operation, leave this setting empty. When you request a list of objects from Shield Advanced, if the response does not include all of the remaining available objects,
* Shield Advanced includes a You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a When you request a list of objects from Shield Advanced, if the response does not include all of the remaining available objects,
* Shield Advanced includes a You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a On your first call to a list operation, leave this setting empty. When you request a list of objects from Shield Advanced, if the response does not include all of the remaining available objects,
* Shield Advanced includes a You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a When you request a list of objects from Shield Advanced, if the response does not include all of the remaining available objects,
* Shield Advanced includes a You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a On your first call to a list operation, leave this setting empty. When you request a list of objects from Shield Advanced, if the response does not include all of the remaining available objects,
* Shield Advanced includes a You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the You can indicate the maximum number of objects that you want Shield Advanced to return for a single call with the Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a Whenever more objects remain that Shield Advanced has not yet returned to you, the response will include a RoleArn
with your subscription. If you submit an AssociateDRTRole
request for an account that already has an associated role, the new RoleArn
will replace the existing RoleArn
. AssociateDRTRole
request, you must attach the AWSShieldDRTAccessPolicy
managed policy to the role that you'll specify in the request. You can access this policy in the IAM console at AWSShieldDRTAccessPolicy. For more information see Adding and removing IAM identity permissions. The role must also trust the service principal
* drt.shield.amazonaws.com
. For more information, see IAM JSON policy elements: Principal.iam:PassRole
permission to make an AssociateDRTRole
request. For more information, see Granting a user permissions to pass a role to an Amazon Web Services service. AssociateDRTRole
request, you must be subscribed to the Business Support plan or the Enterprise Support plan.Block
action. ResponseAction
setting. ResponseAction
setting. "Block": \{\}
* Count
action. ResponseAction
setting. ResponseAction
setting. "Count": \{\}
* AssociateDRTRole
request, you must attach the AWSShieldDRTAccessPolicy managed policy to this role. For more information see Attaching and Detaching IAM Policies.AssociateDRTRole
request, you must attach the AWSShieldDRTAccessPolicy managed policy to this role. For more information see Attaching and Detaching IAM Policies.
+ *
*
NextToken
value in the response. You can retrieve the next batch of objects by requesting the list again and
* providing the token that was returned by the prior call in your request. MaxResults
+ * MaxResults
* setting. Shield Advanced will not return more than MaxResults
objects, but may return fewer, even if more objects are still available.NextToken
value.NextToken
value.NextToken
value in the response. You can retrieve the next batch of objects by requesting the list again and
* providing the token that was returned by the prior call in your request. MaxResults
+ * MaxResults
* setting. Shield Advanced will not return more than MaxResults
objects, but may return fewer, even if more objects are still available.NextToken
value.NextToken
value.NextToken
value in the response. You can retrieve the next batch of objects by requesting the list again and
* providing the token that was returned by the prior call in your request. MaxResults
+ * MaxResults
* setting. Shield Advanced will not return more than MaxResults
objects, but may return fewer, even if more objects are still available.NextToken
value.NextToken
value.NextToken
value in the response. You can retrieve the next batch of objects by requesting the list again and
* providing the token that was returned by the prior call in your request. MaxResults
+ * MaxResults
* setting. Shield Advanced will not return more than MaxResults
objects, but may return fewer, even if more objects are still available.NextToken
value.NextToken
value.NextToken
value in the response. You can retrieve the next batch of objects by requesting the list again and
* providing the token that was returned by the prior call in your request. MaxResults
+ * MaxResults
* setting. Shield Advanced will not return more than MaxResults
objects, but may return fewer, even if more objects are still available.NextToken
value.NextToken
value.NextToken
value in the response. You can retrieve the next batch of objects by requesting the list again and
* providing the token that was returned by the prior call in your request. MaxResults
+ * MaxResults
* setting. Shield Advanced will not return more than MaxResults
objects, but may return fewer, even if more objects are still available.NextToken
value.NextToken
value.NextToken
value in the response. You can retrieve the next batch of objects by requesting the list again and
* providing the token that was returned by the prior call in your request. MaxResults
+ * MaxResults
* setting. Shield Advanced will not return more than MaxResults
objects, but may return fewer, even if more objects are still available.NextToken
value.NextToken
value.NextToken
value in the response. You can retrieve the next batch of objects by requesting the list again and
* providing the token that was returned by the prior call in your request. MaxResults
+ * MaxResults
* setting. Shield Advanced will not return more than MaxResults
objects, but may return fewer, even if more objects are still available.NextToken
value.NextToken
value.
For general information about IAM Identity Center, see What is * IAM Identity Center? in the IAM Identity Center User Guide.
*/ diff --git a/clients/client-sso-oidc/src/SSOOIDCClient.ts b/clients/client-sso-oidc/src/SSOOIDCClient.ts index 240f56e1d8358..225d8638bce8f 100644 --- a/clients/client-sso-oidc/src/SSOOIDCClient.ts +++ b/clients/client-sso-oidc/src/SSOOIDCClient.ts @@ -275,7 +275,6 @@ export interface SSOOIDCClientResolvedConfig extends SSOOIDCClientResolvedConfig * Guide. * *For general information about IAM Identity Center, see What is * IAM Identity Center? in the IAM Identity Center User Guide.
*/ diff --git a/clients/client-sso-oidc/src/endpoint/ruleset.ts b/clients/client-sso-oidc/src/endpoint/ruleset.ts index 6a7e4a8755edc..1b93329135f0d 100644 --- a/clients/client-sso-oidc/src/endpoint/ruleset.ts +++ b/clients/client-sso-oidc/src/endpoint/ruleset.ts @@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/sso-oidc.json */ -const p="required", -q="fn", -r="argv", -s="ref"; -const a="PartitionResult", +const q="required", +r="fn", +s="argv", +t="ref"; +const a="isSet", b="tree", c="error", d="endpoint", -e={[p]:false,"type":"String"}, -f={[p]:true,"default":false,"type":"Boolean"}, -g={[s]:"Endpoint"}, -h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]}, -i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]}, -j={}, -k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]}, -l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]}, -m=[g], -n=[h], -o=[i]; -const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://oidc-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://oidc-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://oidc.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://oidc.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]}; +e="PartitionResult", +f={[q]:false,"type":"String"}, +g={[q]:true,"default":false,"type":"Boolean"}, +h={[t]:"Endpoint"}, +i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]}, +j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]}, +k={}, +l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]}, +m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]}, +n=[i], +o=[j], +p=[{[t]:"Region"}]; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://oidc-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://oidc-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://oidc.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://oidc.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-sso-oidc/src/index.ts b/clients/client-sso-oidc/src/index.ts index 903620480b81d..35e1ecc209235 100644 --- a/clients/client-sso-oidc/src/index.ts +++ b/clients/client-sso-oidc/src/index.ts @@ -38,7 +38,6 @@ * Guide. * *For general information about IAM Identity Center, see What is * IAM Identity Center? in the IAM Identity Center User Guide.
* diff --git a/clients/client-sso-oidc/src/models/models_0.ts b/clients/client-sso-oidc/src/models/models_0.ts index 7598b0ca13591..c88f7184fe819 100644 --- a/clients/client-sso-oidc/src/models/models_0.ts +++ b/clients/client-sso-oidc/src/models/models_0.ts @@ -74,12 +74,10 @@ export interface CreateTokenRequest { * @public *Supports grant types for the authorization code, refresh token, and device code request. * For device code requests, specify the following value:
- * *
* urn:ietf:params:oauth:grant-type:device_code
*
*
For information about how to obtain the device code, see the StartDeviceAuthorization topic.
*/ grantType: string | undefined; diff --git a/clients/client-sso/README.md b/clients/client-sso/README.md index 66b1c7bb94380..8a37787655eba 100644 --- a/clients/client-sso/README.md +++ b/clients/client-sso/README.md @@ -9,16 +9,13 @@ AWS SDK for JavaScript SSO Client for Node.js, Browser and React Native.AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web service that makes it easy for you to assign user access to IAM Identity Center resources such as the AWS access portal. Users can get AWS account applications and roles assigned to them and get federated into the application.
-Although AWS Single Sign-On was renamed, the sso
and
identitystore
API namespaces will continue to retain their original name for
backward compatibility purposes. For more information, see IAM Identity Center rename.
This reference guide describes the IAM Identity Center Portal operations that you can call programatically and includes detailed information on data types and errors.
-AWS provides SDKs that consist of libraries and sample code for various programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs provide a diff --git a/clients/client-sso/src/SSO.ts b/clients/client-sso/src/SSO.ts index 119c2167df54f..ce10b3801b27c 100644 --- a/clients/client-sso/src/SSO.ts +++ b/clients/client-sso/src/SSO.ts @@ -90,16 +90,13 @@ export interface SSO { *
AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web service that makes it easy for you to assign user access to * IAM Identity Center resources such as the AWS access portal. Users can get AWS account applications and roles * assigned to them and get federated into the application.
- * *Although AWS Single Sign-On was renamed, the sso
and
* identitystore
API namespaces will continue to retain their original name for
* backward compatibility purposes. For more information, see IAM Identity Center rename.
This reference guide describes the IAM Identity Center Portal operations that you can call * programatically and includes detailed information on data types and errors.
- * *AWS provides SDKs that consist of libraries and sample code for various programming * languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs provide a diff --git a/clients/client-sso/src/SSOClient.ts b/clients/client-sso/src/SSOClient.ts index add4b6fc399f6..e100c65fb07eb 100644 --- a/clients/client-sso/src/SSOClient.ts +++ b/clients/client-sso/src/SSOClient.ts @@ -241,16 +241,13 @@ export interface SSOClientResolvedConfig extends SSOClientResolvedConfigType {} *
AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web service that makes it easy for you to assign user access to * IAM Identity Center resources such as the AWS access portal. Users can get AWS account applications and roles * assigned to them and get federated into the application.
- * *Although AWS Single Sign-On was renamed, the sso
and
* identitystore
API namespaces will continue to retain their original name for
* backward compatibility purposes. For more information, see IAM Identity Center rename.
This reference guide describes the IAM Identity Center Portal operations that you can call * programatically and includes detailed information on data types and errors.
- * *AWS provides SDKs that consist of libraries and sample code for various programming * languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs provide a diff --git a/clients/client-sso/src/commands/LogoutCommand.ts b/clients/client-sso/src/commands/LogoutCommand.ts index 6b1ecb8f2106c..1c557f345d225 100644 --- a/clients/client-sso/src/commands/LogoutCommand.ts +++ b/clients/client-sso/src/commands/LogoutCommand.ts @@ -39,13 +39,11 @@ export interface LogoutCommandOutput extends __MetadataBearer {} *
Removes the locally stored SSO tokens from the client-side cache and sends an API call to * the IAM Identity Center service to invalidate the corresponding server-side IAM Identity Center sign in * session.
- * *If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM Identity Center sign in session is * used to obtain an IAM session, as specified in the corresponding IAM Identity Center permission set. * More specifically, IAM Identity Center assumes an IAM role in the target account on behalf of the user, * and the corresponding temporary AWS credentials are returned to the client.
- * *After user logout, any existing IAM role sessions that were created by using IAM Identity Center
* permission sets continue based on the duration configured in the permission set.
* For more information, see User
diff --git a/clients/client-sso/src/endpoint/ruleset.ts b/clients/client-sso/src/endpoint/ruleset.ts
index d0abf2f27ee41..91d0e6695cd27 100644
--- a/clients/client-sso/src/endpoint/ruleset.ts
+++ b/clients/client-sso/src/endpoint/ruleset.ts
@@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types";
or see "smithy.rules#endpointRuleSet"
in codegen/sdk-codegen/aws-models/sso.json */
-const p="required",
-q="fn",
-r="argv",
-s="ref";
-const a="PartitionResult",
+const q="required",
+r="fn",
+s="argv",
+t="ref";
+const a="isSet",
b="tree",
c="error",
d="endpoint",
-e={[p]:false,"type":"String"},
-f={[p]:true,"default":false,"type":"Boolean"},
-g={[s]:"Endpoint"},
-h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]},
-i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]},
-j={},
-k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]},
-l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]},
-m=[g],
-n=[h],
-o=[i];
-const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://portal.sso-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://portal.sso-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://portal.sso.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://portal.sso.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]};
+e="PartitionResult",
+f={[q]:false,"type":"String"},
+g={[q]:true,"default":false,"type":"Boolean"},
+h={[t]:"Endpoint"},
+i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]},
+j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]},
+k={},
+l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]},
+m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]},
+n=[i],
+o=[j],
+p=[{[t]:"Region"}];
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://portal.sso-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://portal.sso-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://portal.sso.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://portal.sso.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-sso/src/index.ts b/clients/client-sso/src/index.ts
index 8a75b52b99e4c..9cb218404f84a 100644
--- a/clients/client-sso/src/index.ts
+++ b/clients/client-sso/src/index.ts
@@ -4,16 +4,13 @@
* AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web service that makes it easy for you to assign user access to
* IAM Identity Center resources such as the AWS access portal. Users can get AWS account applications and roles
* assigned to them and get federated into the application. Although AWS Single Sign-On was renamed, the This reference guide describes the IAM Identity Center Portal operations that you can call
* programatically and includes detailed information on data types and errors. AWS provides SDKs that consist of libraries and sample code for various programming
* languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs provide a
diff --git a/clients/client-storage-gateway/README.md b/clients/client-storage-gateway/README.md
index 64c89e2e249f6..9372ff0d84ecc 100644
--- a/clients/client-storage-gateway/README.md
+++ b/clients/client-storage-gateway/README.md
@@ -12,10 +12,8 @@ AWS SDK for JavaScript StorageGateway Client for Node.js, Browser and React Nati
with cloud-based storage to provide seamless and secure integration between an
organization's on-premises IT environment and the Amazon Web Services storage
infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the Storage Gateway
Service API Reference:
@@ -43,7 +41,6 @@ endpoints and quotassso
and
* identitystore
API namespaces will continue to retain their original name for
* backward compatibility purposes. For more information, see IAM Identity Center rename.
Storage Gateway resource IDs are in uppercase. When you use these resource IDs
with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change
@@ -52,7 +49,6 @@ Gateway the ID for a volume might be vol-AA22BB012345DAF670
. When y
this ID with the EC2 API, you must change it to vol-aa22bb012345daf670
.
Otherwise, the EC2 API might not behave as expected.
IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and @@ -60,16 +56,12 @@ snapshots will be created with a 17-character string. Starting in April 2016, yo be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs.
-For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following:
-
arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG
.
A snapshot ID with the longer ID format looks like the following:
snap-78e226633445566ee
.
For more information, see Announcement: Heads-up – Longer Storage Gateway volume and snapshot IDs coming in 2016.
diff --git a/clients/client-storage-gateway/src/StorageGateway.ts b/clients/client-storage-gateway/src/StorageGateway.ts index 2a848bff409f9..e46ce2944ae4c 100644 --- a/clients/client-storage-gateway/src/StorageGateway.ts +++ b/clients/client-storage-gateway/src/StorageGateway.ts @@ -1913,15 +1913,12 @@ export interface StorageGateway { /** * @public *Storage Gateway is the service that connects an on-premises software appliance * with cloud-based storage to provide seamless and secure integration between an * organization's on-premises IT environment and the Amazon Web Services storage * infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery.
- * *Use the following links to get started using the Storage Gateway * Service API Reference:
- * *@@ -1949,7 +1946,6 @@ export interface StorageGateway { * and the endpoints available for use with Storage Gateway.
*Storage Gateway resource IDs are in uppercase. When you use these resource IDs
* with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change
@@ -1958,7 +1954,6 @@ export interface StorageGateway {
* this ID with the EC2 API, you must change it to vol-aa22bb012345daf670
.
* Otherwise, the EC2 API might not behave as expected.
IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway * volumes are changing to a longer format. Starting in December 2016, all new volumes and @@ -1966,16 +1961,12 @@ export interface StorageGateway { * be able to use these longer IDs so you can test your systems with the new format. For * more information, see Longer EC2 and * EBS resource IDs.
- * *For example, a volume Amazon Resource Name (ARN) with the longer volume ID format * looks like the following:
- * *
* arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG
.
A snapshot ID with the longer ID format looks like the following:
* snap-78e226633445566ee
.
For more information, see Announcement: * Heads-up – Longer Storage Gateway volume and snapshot IDs coming in * 2016.
diff --git a/clients/client-storage-gateway/src/StorageGatewayClient.ts b/clients/client-storage-gateway/src/StorageGatewayClient.ts index 2077ed0e8d49d..a11275ff60f7f 100644 --- a/clients/client-storage-gateway/src/StorageGatewayClient.ts +++ b/clients/client-storage-gateway/src/StorageGatewayClient.ts @@ -665,15 +665,12 @@ export interface StorageGatewayClientResolvedConfig extends StorageGatewayClient /** * @public *Storage Gateway is the service that connects an on-premises software appliance * with cloud-based storage to provide seamless and secure integration between an * organization's on-premises IT environment and the Amazon Web Services storage * infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery.
- * *Use the following links to get started using the Storage Gateway * Service API Reference:
- * *@@ -701,7 +698,6 @@ export interface StorageGatewayClientResolvedConfig extends StorageGatewayClient * and the endpoints available for use with Storage Gateway.
*Storage Gateway resource IDs are in uppercase. When you use these resource IDs
* with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change
@@ -710,7 +706,6 @@ export interface StorageGatewayClientResolvedConfig extends StorageGatewayClient
* this ID with the EC2 API, you must change it to vol-aa22bb012345daf670
.
* Otherwise, the EC2 API might not behave as expected.
IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway * volumes are changing to a longer format. Starting in December 2016, all new volumes and @@ -718,16 +713,12 @@ export interface StorageGatewayClientResolvedConfig extends StorageGatewayClient * be able to use these longer IDs so you can test your systems with the new format. For * more information, see Longer EC2 and * EBS resource IDs.
- * *For example, a volume Amazon Resource Name (ARN) with the longer volume ID format * looks like the following:
- * *
* arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG
.
A snapshot ID with the longer ID format looks like the following:
* snap-78e226633445566ee
.
For more information, see Announcement: * Heads-up – Longer Storage Gateway volume and snapshot IDs coming in * 2016.
diff --git a/clients/client-storage-gateway/src/commands/AddCacheCommand.ts b/clients/client-storage-gateway/src/commands/AddCacheCommand.ts index dd624992d49fc..f801ab64b0c5f 100644 --- a/clients/client-storage-gateway/src/commands/AddCacheCommand.ts +++ b/clients/client-storage-gateway/src/commands/AddCacheCommand.ts @@ -38,7 +38,6 @@ export interface AddCacheCommandOutput extends AddCacheOutput, __MetadataBearer * @public *Configures one or more gateway local disks as cache for a gateway. This operation is * only supported in the cached volume, tape, and file gateway type (see How Storage Gateway works (architecture).
- * *In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to * add cache, and one or more disk IDs that you want to configure as cache.
* @example diff --git a/clients/client-storage-gateway/src/commands/AddTagsToResourceCommand.ts b/clients/client-storage-gateway/src/commands/AddTagsToResourceCommand.ts index 3d2519bdb4f66..3908ae0c320a1 100644 --- a/clients/client-storage-gateway/src/commands/AddTagsToResourceCommand.ts +++ b/clients/client-storage-gateway/src/commands/AddTagsToResourceCommand.ts @@ -40,7 +40,6 @@ export interface AddTagsToResourceCommandOutput extends AddTagsToResourceOutput, * resources, which you can use to categorize these resources. For example, you can categorize * resources by purpose, owner, environment, or team. Each tag consists of a key and a value, * which you define. You can add tags to the following Storage Gateway resources: - * *Storage gateways of all types
@@ -58,7 +57,6 @@ export interface AddTagsToResourceCommandOutput extends AddTagsToResourceOutput, *File System associations
*You can create a maximum of 50 tags for each resource. Virtual tapes and storage volumes * that are recovered to a new gateway maintain their tags.
* @example diff --git a/clients/client-storage-gateway/src/commands/AddUploadBufferCommand.ts b/clients/client-storage-gateway/src/commands/AddUploadBufferCommand.ts index 11a21d38af6b0..f78c47899b785 100644 --- a/clients/client-storage-gateway/src/commands/AddUploadBufferCommand.ts +++ b/clients/client-storage-gateway/src/commands/AddUploadBufferCommand.ts @@ -39,7 +39,6 @@ export interface AddUploadBufferCommandOutput extends AddUploadBufferOutput, __M *Configures one or more gateway local disks as upload buffer for a specified gateway. * This operation is supported for the stored volume, cached volume, and tape gateway * types.
- * *In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to * add upload buffer, and one or more disk IDs that you want to configure as upload * buffer.
diff --git a/clients/client-storage-gateway/src/commands/AddWorkingStorageCommand.ts b/clients/client-storage-gateway/src/commands/AddWorkingStorageCommand.ts index df5f4f7e5aaa3..f6cf2c36d1a96 100644 --- a/clients/client-storage-gateway/src/commands/AddWorkingStorageCommand.ts +++ b/clients/client-storage-gateway/src/commands/AddWorkingStorageCommand.ts @@ -40,12 +40,10 @@ export interface AddWorkingStorageCommandOutput extends AddWorkingStorageOutput, * operation is only supported in the stored volume gateway type. This operation is deprecated * in cached volume API version 20120630. Use AddUploadBuffer * instead. - * *Working storage is also referred to as upload buffer. You can also use the AddUploadBuffer operation to add upload buffer to a stored volume * gateway.
*In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to * add working storage, and one or more disk IDs that you want to configure as working * storage.
diff --git a/clients/client-storage-gateway/src/commands/CreateCachediSCSIVolumeCommand.ts b/clients/client-storage-gateway/src/commands/CreateCachediSCSIVolumeCommand.ts index e9903e590ed95..0418d814702dd 100644 --- a/clients/client-storage-gateway/src/commands/CreateCachediSCSIVolumeCommand.ts +++ b/clients/client-storage-gateway/src/commands/CreateCachediSCSIVolumeCommand.ts @@ -38,18 +38,15 @@ export interface CreateCachediSCSIVolumeCommandOutput extends CreateCachediSCSIV * @public *Creates a cached volume on a specified cached volume gateway. This operation is only * supported in the cached volume gateway type.
- * *Cache storage must be allocated to the gateway before you can create a cached volume. * Use the AddCache operation to add cache storage to a gateway.
*In the request, you must specify the gateway, size of the volume in bytes, the iSCSI * target name, an IP address on which to expose the target, and a unique client token. In * response, the gateway creates the volume and returns information about it. This information * includes the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that * initiators can use to connect to the volume target.
- * *Optionally, you can provide the ARN for an existing volume as the
* SourceVolumeARN
for this cached volume, which creates an exact copy of the
* existing volume’s latest recovery point. The VolumeSizeInBytes
value must be
diff --git a/clients/client-storage-gateway/src/commands/CreateNFSFileShareCommand.ts b/clients/client-storage-gateway/src/commands/CreateNFSFileShareCommand.ts
index 6ab1a88a891d2..cb009f385dc8d 100644
--- a/clients/client-storage-gateway/src/commands/CreateNFSFileShareCommand.ts
+++ b/clients/client-storage-gateway/src/commands/CreateNFSFileShareCommand.ts
@@ -40,7 +40,6 @@ export interface CreateNFSFileShareCommandOutput extends CreateNFSFileShareOutpu
* Storage Gateway, a file share is a file system mount point backed by Amazon S3
* cloud storage. Storage Gateway exposes file shares using an NFS interface. This operation
* is only supported for S3 File Gateways.
S3 File gateway requires Security Token Service (Amazon Web Services STS) to be * activated to enable you to create a file share. Make sure Amazon Web Services STS is @@ -49,7 +48,6 @@ export interface CreateNFSFileShareCommandOutput extends CreateNFSFileShareOutpu * it. For information about how to activate Amazon Web Services STS, see Activating and * deactivating Amazon Web Services STS in an Amazon Web Services Region in the * Identity and Access Management User Guide.
- * *S3 File Gateways do not support creating hard or symbolic links on a file * share.
*S3 File Gateways require Security Token Service (Amazon Web Services STS) to be * activated to enable you to create a file share. Make sure that Amazon Web Services STS @@ -49,7 +48,6 @@ export interface CreateSMBFileShareCommandOutput extends CreateSMBFileShareOutpu * it. For information about how to activate Amazon Web Services STS, see Activating and * deactivating Amazon Web Services STS in an Amazon Web Services Region in the * Identity and Access Management User Guide.
- * *File gateways don't support creating hard or symbolic links on a file * share.
*Initiates a snapshot of a volume.
- * *Storage Gateway provides the ability to back up point-in-time snapshots of your * data to Amazon Simple Storage (Amazon S3) for durable off-site recovery, and also * import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute * Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad hoc basis. * This API enables you to take an ad hoc snapshot. For more information, see Editing a * snapshot schedule.
- * *In the CreateSnapshot
request, you identify the volume by providing its
* Amazon Resource Name (ARN). You must also provide description for the snapshot. When
* Storage Gateway takes the snapshot of specified volume, the snapshot and
@@ -52,14 +50,12 @@ export interface CreateSnapshotCommandOutput extends CreateSnapshotOutput, __Met
* returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or
* later use it when you want to create a volume from a snapshot. This operation is only
* supported in stored and cached volume gateway type.
To list or delete a snapshot, you must use the Amazon EC2 API. For more information, * see DescribeSnapshots * or DeleteSnapshot in the Amazon Elastic Compute Cloud API * Reference.
*Volume and snapshot IDs are changing to a longer length ID format. For more * information, see the important note on the Welcome page.
diff --git a/clients/client-storage-gateway/src/commands/CreateSnapshotFromVolumeRecoveryPointCommand.ts b/clients/client-storage-gateway/src/commands/CreateSnapshotFromVolumeRecoveryPointCommand.ts index 556a2fa940df9..9502f0853b151 100644 --- a/clients/client-storage-gateway/src/commands/CreateSnapshotFromVolumeRecoveryPointCommand.ts +++ b/clients/client-storage-gateway/src/commands/CreateSnapshotFromVolumeRecoveryPointCommand.ts @@ -46,11 +46,9 @@ export interface CreateSnapshotFromVolumeRecoveryPointCommandOutput * @public *Initiates a snapshot of a gateway from a volume recovery point. This operation is only * supported in the cached volume gateway type.
- * *A volume recovery point is a point in time at which all data of the volume is consistent * and from which you can create a snapshot. To get a list of volume recovery point for cached * volume gateway, use ListVolumeRecoveryPoints.
- * *In the CreateSnapshotFromVolumeRecoveryPoint
request, you identify the
* volume by providing its Amazon Resource Name (ARN). You must also provide a description for
* the snapshot. When the gateway takes a snapshot of the specified volume, the snapshot and
@@ -58,7 +56,6 @@ export interface CreateSnapshotFromVolumeRecoveryPointCommandOutput
* In response, the gateway returns
* you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use
* it when you want to create a volume from a snapshot.
To list or delete a snapshot, you must use the Amazon EC2 API. For more information, * see DescribeSnapshots diff --git a/clients/client-storage-gateway/src/commands/CreateStorediSCSIVolumeCommand.ts b/clients/client-storage-gateway/src/commands/CreateStorediSCSIVolumeCommand.ts index 62a2a05d6aabb..e858c4566c968 100644 --- a/clients/client-storage-gateway/src/commands/CreateStorediSCSIVolumeCommand.ts +++ b/clients/client-storage-gateway/src/commands/CreateStorediSCSIVolumeCommand.ts @@ -38,12 +38,10 @@ export interface CreateStorediSCSIVolumeCommandOutput extends CreateStorediSCSIV * @public *
Creates a volume on a specified gateway. This operation is only supported in the stored * volume gateway type.
- * *The size of the volume to create is inferred from the disk size. You can choose to * preserve existing data on the disk, create volume from an existing snapshot, or create an * empty volume. If you choose to create an empty gateway volume, then any existing data on * the disk is erased.
- * *In the request, you must specify the gateway and the disk information on which you are * creating the volume. In response, the gateway creates the volume and returns volume * information such as the volume Amazon Resource Name (ARN), its size, and the iSCSI target diff --git a/clients/client-storage-gateway/src/commands/CreateTapePoolCommand.ts b/clients/client-storage-gateway/src/commands/CreateTapePoolCommand.ts index 17c124448805c..3f4c0e14ae705 100644 --- a/clients/client-storage-gateway/src/commands/CreateTapePoolCommand.ts +++ b/clients/client-storage-gateway/src/commands/CreateTapePoolCommand.ts @@ -46,8 +46,8 @@ export interface CreateTapePoolCommandOutput extends CreateTapePoolOutput, __Met * const client = new StorageGatewayClient(config); * const input = { // CreateTapePoolInput * PoolName: "STRING_VALUE", // required - * StorageClass: "STRING_VALUE", // required - * RetentionLockType: "STRING_VALUE", + * StorageClass: "DEEP_ARCHIVE" || "GLACIER", // required + * RetentionLockType: "COMPLIANCE" || "GOVERNANCE" || "NONE", * RetentionLockTimeInDays: Number("int"), * Tags: [ // Tags * { // Tag diff --git a/clients/client-storage-gateway/src/commands/CreateTapeWithBarcodeCommand.ts b/clients/client-storage-gateway/src/commands/CreateTapeWithBarcodeCommand.ts index d2fed07e599b7..4d9133d5dce02 100644 --- a/clients/client-storage-gateway/src/commands/CreateTapeWithBarcodeCommand.ts +++ b/clients/client-storage-gateway/src/commands/CreateTapeWithBarcodeCommand.ts @@ -40,7 +40,6 @@ export interface CreateTapeWithBarcodeCommandOutput extends CreateTapeWithBarcod * then archive the tape. A barcode is unique and cannot be reused if it has already been used * on a tape. This applies to barcodes used on deleted tapes. This operation is only supported * in the tape gateway type.
- * *Cache storage must be allocated to the gateway before you can create a virtual tape. * Use the AddCache operation to add cache storage to a gateway.
diff --git a/clients/client-storage-gateway/src/commands/CreateTapesCommand.ts b/clients/client-storage-gateway/src/commands/CreateTapesCommand.ts index edbecd4461c17..5a5fd86a7782b 100644 --- a/clients/client-storage-gateway/src/commands/CreateTapesCommand.ts +++ b/clients/client-storage-gateway/src/commands/CreateTapesCommand.ts @@ -38,7 +38,6 @@ export interface CreateTapesCommandOutput extends CreateTapesOutput, __MetadataB * @public *Creates one or more virtual tapes. You write data to the virtual tapes and then archive * the tapes. This operation is only supported in the tape gateway type.
- * *Cache storage must be allocated to the gateway before you can create virtual tapes. * Use the AddCache operation to add cache storage to a gateway.
diff --git a/clients/client-storage-gateway/src/commands/DeleteGatewayCommand.ts b/clients/client-storage-gateway/src/commands/DeleteGatewayCommand.ts index 560de77c23fe8..1ae407c5b6343 100644 --- a/clients/client-storage-gateway/src/commands/DeleteGatewayCommand.ts +++ b/clients/client-storage-gateway/src/commands/DeleteGatewayCommand.ts @@ -39,12 +39,10 @@ export interface DeleteGatewayCommandOutput extends DeleteGatewayOutput, __Metad *Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name * (ARN) of the gateway in your request. The operation deletes the gateway; however, it does * not delete the gateway virtual machine (VM) from your host computer.
- * *After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway * volumes are not deleted upon deleting the gateway, however, pending snapshots will not * complete. After you delete a gateway, your next step is to remove it from your * environment.
- * *You no longer pay software charges after the gateway is deleted; however, your * existing Amazon EBS snapshots persist and you will continue to be billed for these diff --git a/clients/client-storage-gateway/src/commands/DeleteSnapshotScheduleCommand.ts b/clients/client-storage-gateway/src/commands/DeleteSnapshotScheduleCommand.ts index 37f448ff505e2..0894d1244bd73 100644 --- a/clients/client-storage-gateway/src/commands/DeleteSnapshotScheduleCommand.ts +++ b/clients/client-storage-gateway/src/commands/DeleteSnapshotScheduleCommand.ts @@ -37,16 +37,13 @@ export interface DeleteSnapshotScheduleCommandOutput extends DeleteSnapshotSched /** * @public *
Deletes a snapshot of a volume.
- * *You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API
* action enables you to delete a snapshot schedule for a volume. For more information, see
* Backing up your
* volumes. In the DeleteSnapshotSchedule
request, you identify the
* volume by providing its Amazon Resource Name (ARN). This operation is only supported for
* cached volume gateway types.
To list or delete a snapshot, you must use the Amazon EC2 API. For more information, * go to DescribeSnapshots * in the Amazon Elastic Compute Cloud API Reference.
diff --git a/clients/client-storage-gateway/src/commands/DeleteVolumeCommand.ts b/clients/client-storage-gateway/src/commands/DeleteVolumeCommand.ts index e0fc7fd675d04..665d749877d30 100644 --- a/clients/client-storage-gateway/src/commands/DeleteVolumeCommand.ts +++ b/clients/client-storage-gateway/src/commands/DeleteVolumeCommand.ts @@ -40,13 +40,11 @@ export interface DeleteVolumeCommandOutput extends DeleteVolumeOutput, __Metadat * This operation is only supported in the cached volume and stored volume types. For stored * volume gateways, the local disk that was configured as the storage volume is not deleted. * You can reuse the local disk to create another storage volume. - * *Before you delete a volume, make sure there are no iSCSI connections to the volume you * are deleting. You should also make sure there is no snapshot in progress. You can use the * Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are * deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API * Reference.
- * *In the request, you must provide the Amazon Resource Name (ARN) of the storage volume * you want to delete.
* @example diff --git a/clients/client-storage-gateway/src/commands/DescribeAvailabilityMonitorTestCommand.ts b/clients/client-storage-gateway/src/commands/DescribeAvailabilityMonitorTestCommand.ts index 9baa92ed1b696..f2ee447471eaa 100644 --- a/clients/client-storage-gateway/src/commands/DescribeAvailabilityMonitorTestCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeAvailabilityMonitorTestCommand.ts @@ -57,7 +57,7 @@ export interface DescribeAvailabilityMonitorTestCommandOutput * const response = await client.send(command); * // { // DescribeAvailabilityMonitorTestOutput * // GatewayARN: "STRING_VALUE", - * // Status: "STRING_VALUE", + * // Status: "COMPLETE" || "FAILED" || "PENDING", * // StartTime: new Date("TIMESTAMP"), * // }; * diff --git a/clients/client-storage-gateway/src/commands/DescribeBandwidthRateLimitCommand.ts b/clients/client-storage-gateway/src/commands/DescribeBandwidthRateLimitCommand.ts index baffc50425503..61143f0806fac 100644 --- a/clients/client-storage-gateway/src/commands/DescribeBandwidthRateLimitCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeBandwidthRateLimitCommand.ts @@ -40,7 +40,6 @@ export interface DescribeBandwidthRateLimitCommandOutput extends DescribeBandwid * which means no bandwidth rate limiting is in effect. This operation is supported only for * the stored volume, cached volume, and tape gateway types. To describe bandwidth rate limits * for S3 file gateways, use DescribeBandwidthRateLimitSchedule. - * *This operation returns a value for a bandwidth rate limit only if the limit is set. If * no limits are set for the gateway, then this operation returns only the gateway ARN in the * response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of diff --git a/clients/client-storage-gateway/src/commands/DescribeBandwidthRateLimitScheduleCommand.ts b/clients/client-storage-gateway/src/commands/DescribeBandwidthRateLimitScheduleCommand.ts index cbfcf8830c79b..130ad5f0c4c5b 100644 --- a/clients/client-storage-gateway/src/commands/DescribeBandwidthRateLimitScheduleCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeBandwidthRateLimitScheduleCommand.ts @@ -45,16 +45,13 @@ export interface DescribeBandwidthRateLimitScheduleCommandOutput * gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting * is in effect. This operation is supported only for volume, tape and S3 file gateways. FSx * file gateways do not support bandwidth rate limits.
- * *This operation returns information about a gateway's bandwidth rate limit schedule. A * bandwidth rate limit schedule consists of one or more bandwidth rate limit intervals. A * bandwidth rate limit interval defines a period of time on one or more days of the week, * during which bandwidth rate limits are specified for uploading, downloading, or both.
- * *A bandwidth rate limit interval consists of one or more days of the week, a start hour * and minute, an ending hour and minute, and bandwidth rate limits for uploading and * downloading
- * *If no bandwidth rate limit schedule intervals are set for the gateway, this operation * returns an empty response. To specify which gateway to describe, use the Amazon Resource * Name (ARN) of the gateway in your request.
diff --git a/clients/client-storage-gateway/src/commands/DescribeCacheCommand.ts b/clients/client-storage-gateway/src/commands/DescribeCacheCommand.ts index 193ed57562ecc..a3458df120348 100644 --- a/clients/client-storage-gateway/src/commands/DescribeCacheCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeCacheCommand.ts @@ -38,7 +38,6 @@ export interface DescribeCacheCommandOutput extends DescribeCacheOutput, __Metad * @public *Returns information about the cache of a gateway. This operation is only supported in * the cached volume, tape, and file gateway types.
- * *The response includes disk IDs that are configured as cache, and it includes the amount * of cache allocated and used.
* @example diff --git a/clients/client-storage-gateway/src/commands/DescribeCachediSCSIVolumesCommand.ts b/clients/client-storage-gateway/src/commands/DescribeCachediSCSIVolumesCommand.ts index 2f55b2d8224ab..abe91b4d60ad6 100644 --- a/clients/client-storage-gateway/src/commands/DescribeCachediSCSIVolumesCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeCachediSCSIVolumesCommand.ts @@ -38,7 +38,6 @@ export interface DescribeCachediSCSIVolumesCommandOutput extends DescribeCachedi * @public *Returns a description of the gateway volumes specified in the request. This operation is * only supported in the cached volume gateway types.
- * *The list of gateway volumes in the request must be from one gateway. In the response, * Storage Gateway returns volume information sorted by volume Amazon Resource Name * (ARN).
diff --git a/clients/client-storage-gateway/src/commands/DescribeGatewayInformationCommand.ts b/clients/client-storage-gateway/src/commands/DescribeGatewayInformationCommand.ts index aea63d0f5f58c..18087e5694f22 100644 --- a/clients/client-storage-gateway/src/commands/DescribeGatewayInformationCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeGatewayInformationCommand.ts @@ -76,13 +76,13 @@ export interface DescribeGatewayInformationCommandOutput extends DescribeGateway * // ], * // VPCEndpoint: "STRING_VALUE", * // CloudWatchLogGroupARN: "STRING_VALUE", - * // HostEnvironment: "STRING_VALUE", + * // HostEnvironment: "VMWARE" || "HYPER-V" || "EC2" || "KVM" || "OTHER" || "SNOWBALL", * // EndpointType: "STRING_VALUE", * // SoftwareUpdatesEndDate: "STRING_VALUE", * // DeprecationDate: "STRING_VALUE", - * // GatewayCapacity: "STRING_VALUE", + * // GatewayCapacity: "Small" || "Medium" || "Large", * // SupportedGatewayCapacities: [ // SupportedGatewayCapacities - * // "STRING_VALUE", + * // "Small" || "Medium" || "Large", * // ], * // HostEnvironmentId: "STRING_VALUE", * // }; diff --git a/clients/client-storage-gateway/src/commands/DescribeNFSFileSharesCommand.ts b/clients/client-storage-gateway/src/commands/DescribeNFSFileSharesCommand.ts index ce624876b9f86..4c0fd1f02d321 100644 --- a/clients/client-storage-gateway/src/commands/DescribeNFSFileSharesCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeNFSFileSharesCommand.ts @@ -70,7 +70,7 @@ export interface DescribeNFSFileSharesCommandOutput extends DescribeNFSFileShare * // Role: "STRING_VALUE", * // LocationARN: "STRING_VALUE", * // DefaultStorageClass: "STRING_VALUE", - * // ObjectACL: "STRING_VALUE", + * // ObjectACL: "private" || "public-read" || "public-read-write" || "authenticated-read" || "bucket-owner-read" || "bucket-owner-full-control" || "aws-exec-read", * // ClientList: [ // FileShareClientList * // "STRING_VALUE", * // ], diff --git a/clients/client-storage-gateway/src/commands/DescribeSMBFileSharesCommand.ts b/clients/client-storage-gateway/src/commands/DescribeSMBFileSharesCommand.ts index 8b1b0aab1322c..4cfbb39f0b6e8 100644 --- a/clients/client-storage-gateway/src/commands/DescribeSMBFileSharesCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeSMBFileSharesCommand.ts @@ -64,7 +64,7 @@ export interface DescribeSMBFileSharesCommandOutput extends DescribeSMBFileShare * // Role: "STRING_VALUE", * // LocationARN: "STRING_VALUE", * // DefaultStorageClass: "STRING_VALUE", - * // ObjectACL: "STRING_VALUE", + * // ObjectACL: "private" || "public-read" || "public-read-write" || "authenticated-read" || "bucket-owner-read" || "bucket-owner-full-control" || "aws-exec-read", * // ReadOnly: true || false, * // GuessMIMETypeEnabled: true || false, * // RequesterPays: true || false, @@ -81,7 +81,7 @@ export interface DescribeSMBFileSharesCommandOutput extends DescribeSMBFileShare * // ], * // AuditDestinationARN: "STRING_VALUE", * // Authentication: "STRING_VALUE", - * // CaseSensitivity: "STRING_VALUE", + * // CaseSensitivity: "ClientSpecified" || "CaseSensitive", * // Tags: [ // Tags * // { // Tag * // Key: "STRING_VALUE", // required diff --git a/clients/client-storage-gateway/src/commands/DescribeSMBSettingsCommand.ts b/clients/client-storage-gateway/src/commands/DescribeSMBSettingsCommand.ts index e4c14ee2a9403..45520c9d783bf 100644 --- a/clients/client-storage-gateway/src/commands/DescribeSMBSettingsCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeSMBSettingsCommand.ts @@ -52,9 +52,9 @@ export interface DescribeSMBSettingsCommandOutput extends DescribeSMBSettingsOut * // { // DescribeSMBSettingsOutput * // GatewayARN: "STRING_VALUE", * // DomainName: "STRING_VALUE", - * // ActiveDirectoryStatus: "STRING_VALUE", + * // ActiveDirectoryStatus: "ACCESS_DENIED" || "DETACHED" || "JOINED" || "JOINING" || "NETWORK_ERROR" || "TIMEOUT" || "UNKNOWN_ERROR", * // SMBGuestPasswordSet: true || false, - * // SMBSecurityStrategy: "STRING_VALUE", + * // SMBSecurityStrategy: "ClientSpecified" || "MandatorySigning" || "MandatoryEncryption", * // FileSharesVisible: true || false, * // SMBLocalGroups: { // SMBLocalGroups * // GatewayAdmins: [ // UserList diff --git a/clients/client-storage-gateway/src/commands/DescribeTapeArchivesCommand.ts b/clients/client-storage-gateway/src/commands/DescribeTapeArchivesCommand.ts index 1c6505eba2e45..fa1dba7a659c8 100644 --- a/clients/client-storage-gateway/src/commands/DescribeTapeArchivesCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeTapeArchivesCommand.ts @@ -38,7 +38,6 @@ export interface DescribeTapeArchivesCommandOutput extends DescribeTapeArchivesO * @public *Returns a description of specified virtual tapes in the virtual tape shelf (VTS). This * operation is only supported in the tape gateway type.
- * *If a specific TapeARN
is not specified, Storage Gateway returns a
* description of all virtual tapes found in the VTS associated with your account.
Returns a list of virtual tape recovery points that are available for the specified tape * gateway.
- * *A recovery point is a point-in-time view of a virtual tape at which all the data on the * virtual tape is consistent. If your gateway crashes, virtual tapes that have recovery * points can be recovered to a new gateway. This operation is only supported in the tape diff --git a/clients/client-storage-gateway/src/commands/DescribeUploadBufferCommand.ts b/clients/client-storage-gateway/src/commands/DescribeUploadBufferCommand.ts index 22a3999eeff6b..8d7e5f25c2bb0 100644 --- a/clients/client-storage-gateway/src/commands/DescribeUploadBufferCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeUploadBufferCommand.ts @@ -38,7 +38,6 @@ export interface DescribeUploadBufferCommandOutput extends DescribeUploadBufferO * @public *
Returns information about the upload buffer of a gateway. This operation is supported * for the stored volume, cached volume, and tape gateway types.
- * *The response includes disk IDs that are configured as upload buffer space, and it * includes the amount of upload buffer space allocated and used.
* @example diff --git a/clients/client-storage-gateway/src/commands/DescribeVTLDevicesCommand.ts b/clients/client-storage-gateway/src/commands/DescribeVTLDevicesCommand.ts index d26d2a462f7cd..bfe98d5f78955 100644 --- a/clients/client-storage-gateway/src/commands/DescribeVTLDevicesCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeVTLDevicesCommand.ts @@ -38,7 +38,6 @@ export interface DescribeVTLDevicesCommandOutput extends DescribeVTLDevicesOutpu * @public *Returns a description of virtual tape library (VTL) devices for the specified tape * gateway. In the response, Storage Gateway returns VTL device information.
- * *This operation is only supported in the tape gateway type.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-storage-gateway/src/commands/DescribeWorkingStorageCommand.ts b/clients/client-storage-gateway/src/commands/DescribeWorkingStorageCommand.ts index d363fa6028814..bb3e612849e24 100644 --- a/clients/client-storage-gateway/src/commands/DescribeWorkingStorageCommand.ts +++ b/clients/client-storage-gateway/src/commands/DescribeWorkingStorageCommand.ts @@ -39,12 +39,10 @@ export interface DescribeWorkingStorageCommandOutput extends DescribeWorkingStor *Returns information about the working storage of a gateway. This operation is only * supported in the stored volumes gateway type. This operation is deprecated in cached * volumes API version (20120630). Use DescribeUploadBuffer instead.
- * *Working storage is also referred to as upload buffer. You can also use the * DescribeUploadBuffer operation to add upload buffer to a stored volume gateway.
*The response includes disk IDs that are configured as working storage, and it includes * the amount of working storage allocated and used.
* @example diff --git a/clients/client-storage-gateway/src/commands/DisableGatewayCommand.ts b/clients/client-storage-gateway/src/commands/DisableGatewayCommand.ts index 40d73774c8d95..b7a78d754232d 100644 --- a/clients/client-storage-gateway/src/commands/DisableGatewayCommand.ts +++ b/clients/client-storage-gateway/src/commands/DisableGatewayCommand.ts @@ -38,10 +38,8 @@ export interface DisableGatewayCommandOutput extends DisableGatewayOutput, __Met * @public *Disables a tape gateway when the gateway is no longer functioning. For example, if your * gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.
- * *Use this operation for a tape gateway that is not reachable or not functioning. This * operation is only supported in the tape gateway type.
- * *After a gateway is disabled, it cannot be enabled.
*Lists the automatic tape creation policies for a gateway. If there are no automatic tape * creation policies for the gateway, it returns an empty list.
- * *This operation is only supported for tape gateways.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-storage-gateway/src/commands/ListFileSharesCommand.ts b/clients/client-storage-gateway/src/commands/ListFileSharesCommand.ts index 10abf2ec7d931..e2d4e92589522 100644 --- a/clients/client-storage-gateway/src/commands/ListFileSharesCommand.ts +++ b/clients/client-storage-gateway/src/commands/ListFileSharesCommand.ts @@ -57,7 +57,7 @@ export interface ListFileSharesCommandOutput extends ListFileSharesOutput, __Met * // NextMarker: "STRING_VALUE", * // FileShareInfoList: [ // FileShareInfoList * // { // FileShareInfo - * // FileShareType: "STRING_VALUE", + * // FileShareType: "NFS" || "SMB", * // FileShareARN: "STRING_VALUE", * // FileShareId: "STRING_VALUE", * // FileShareStatus: "STRING_VALUE", diff --git a/clients/client-storage-gateway/src/commands/ListGatewaysCommand.ts b/clients/client-storage-gateway/src/commands/ListGatewaysCommand.ts index a963689e8f8d8..1ef4148358fbc 100644 --- a/clients/client-storage-gateway/src/commands/ListGatewaysCommand.ts +++ b/clients/client-storage-gateway/src/commands/ListGatewaysCommand.ts @@ -39,11 +39,9 @@ export interface ListGatewaysCommandOutput extends ListGatewaysOutput, __Metadat *Lists gateways owned by an Amazon Web Services account in an Amazon Web Services Region * specified in the request. The returned list is ordered by gateway Amazon Resource Name * (ARN).
- * *By default, the operation returns a maximum of 100 gateways. This operation supports * pagination that allows you to optionally reduce the number of gateways returned in a * response.
- * *If you have more gateways than are returned in a response (that is, the response returns * only a truncated list of your gateways), the response contains a marker that you can * specify in your next request to fetch the next page of gateways.
@@ -69,7 +67,7 @@ export interface ListGatewaysCommandOutput extends ListGatewaysOutput, __Metadat * // GatewayName: "STRING_VALUE", * // Ec2InstanceId: "STRING_VALUE", * // Ec2InstanceRegion: "STRING_VALUE", - * // HostEnvironment: "STRING_VALUE", + * // HostEnvironment: "VMWARE" || "HYPER-V" || "EC2" || "KVM" || "OTHER" || "SNOWBALL", * // HostEnvironmentId: "STRING_VALUE", * // }, * // ], diff --git a/clients/client-storage-gateway/src/commands/ListLocalDisksCommand.ts b/clients/client-storage-gateway/src/commands/ListLocalDisksCommand.ts index 8f8808ce25e65..8f81265bf8ac3 100644 --- a/clients/client-storage-gateway/src/commands/ListLocalDisksCommand.ts +++ b/clients/client-storage-gateway/src/commands/ListLocalDisksCommand.ts @@ -38,7 +38,6 @@ export interface ListLocalDisksCommandOutput extends ListLocalDisksOutput, __Met * @public *Returns a list of the gateway's local disks. To specify which gateway to describe, * you use the Amazon Resource Name (ARN) of the gateway in the body of the request.
- * *The request returns a list of all disks, specifying which are configured as working
* storage, cache storage, or stored volume or not configured at all. The response includes a
* DiskStatus
field. This field can have a value of present (the disk is
diff --git a/clients/client-storage-gateway/src/commands/ListTapePoolsCommand.ts b/clients/client-storage-gateway/src/commands/ListTapePoolsCommand.ts
index e012b26a9ade9..723932d961ed6 100644
--- a/clients/client-storage-gateway/src/commands/ListTapePoolsCommand.ts
+++ b/clients/client-storage-gateway/src/commands/ListTapePoolsCommand.ts
@@ -39,7 +39,6 @@ export interface ListTapePoolsCommandOutput extends ListTapePoolsOutput, __Metad
*
Lists custom tape pools. You specify custom tape pools to list by specifying one or more * custom tape pool Amazon Resource Names (ARNs). If you don't specify a custom tape pool ARN, * the operation lists all custom tape pools.
- * *This operation supports pagination. You can optionally specify the Limit
* parameter in the body to limit the number of tape pools in the response. If the number of
* tape pools returned in the response is truncated, the response includes a
@@ -65,10 +64,10 @@ export interface ListTapePoolsCommandOutput extends ListTapePoolsOutput, __Metad
* // { // PoolInfo
* // PoolARN: "STRING_VALUE",
* // PoolName: "STRING_VALUE",
- * // StorageClass: "STRING_VALUE",
- * // RetentionLockType: "STRING_VALUE",
+ * // StorageClass: "DEEP_ARCHIVE" || "GLACIER",
+ * // RetentionLockType: "COMPLIANCE" || "GOVERNANCE" || "NONE",
* // RetentionLockTimeInDays: Number("int"),
- * // PoolStatus: "STRING_VALUE",
+ * // PoolStatus: "ACTIVE" || "DELETED",
* // },
* // ],
* // Marker: "STRING_VALUE",
diff --git a/clients/client-storage-gateway/src/commands/ListTapesCommand.ts b/clients/client-storage-gateway/src/commands/ListTapesCommand.ts
index f885d474a7d49..a4efb5d75a8cd 100644
--- a/clients/client-storage-gateway/src/commands/ListTapesCommand.ts
+++ b/clients/client-storage-gateway/src/commands/ListTapesCommand.ts
@@ -40,7 +40,6 @@ export interface ListTapesCommandOutput extends ListTapesOutput, __MetadataBeare
* (VTS). You specify the tapes to list by specifying one or more tape Amazon Resource Names
* (ARNs). If you don't specify a tape ARN, the operation lists all virtual tapes in both
* your VTL and VTS.
This operation supports pagination. By default, the operation returns a maximum of up to
* 100 tapes. You can optionally specify the Limit
parameter in the body to limit
* the number of tapes in the response. If the number of tapes returned in the response is
diff --git a/clients/client-storage-gateway/src/commands/ListVolumeRecoveryPointsCommand.ts b/clients/client-storage-gateway/src/commands/ListVolumeRecoveryPointsCommand.ts
index 313c0d2be1cf0..75355dd247a3f 100644
--- a/clients/client-storage-gateway/src/commands/ListVolumeRecoveryPointsCommand.ts
+++ b/clients/client-storage-gateway/src/commands/ListVolumeRecoveryPointsCommand.ts
@@ -38,7 +38,6 @@ export interface ListVolumeRecoveryPointsCommandOutput extends ListVolumeRecover
* @public
*
Lists the recovery points for a specified gateway. This operation is only supported in * the cached volume gateway type.
- * *Each cache volume has one recovery point. A volume recovery point is a point in time at * which all data of the volume is consistent and from which you can create a snapshot or * clone a new cached volume from a source volume. To create a snapshot from a volume recovery diff --git a/clients/client-storage-gateway/src/commands/ListVolumesCommand.ts b/clients/client-storage-gateway/src/commands/ListVolumesCommand.ts index db9b89d2f36af..3cacae167b2fb 100644 --- a/clients/client-storage-gateway/src/commands/ListVolumesCommand.ts +++ b/clients/client-storage-gateway/src/commands/ListVolumesCommand.ts @@ -39,7 +39,6 @@ export interface ListVolumesCommandOutput extends ListVolumesOutput, __MetadataB *
Lists the iSCSI stored volumes of a gateway. Results are sorted by volume ARN. The * response includes only the volume ARNs. If you want additional volume information, use the * DescribeStorediSCSIVolumes or the DescribeCachediSCSIVolumes API.
- * *The operation supports pagination. By default, the operation returns a maximum of up to
* 100 volumes. You can optionally specify the Limit
field in the body to limit
* the number of volumes in the response. If the number of volumes returned in the response is
diff --git a/clients/client-storage-gateway/src/commands/NotifyWhenUploadedCommand.ts b/clients/client-storage-gateway/src/commands/NotifyWhenUploadedCommand.ts
index ed9c5a91235d7..659e6b5d327ef 100644
--- a/clients/client-storage-gateway/src/commands/NotifyWhenUploadedCommand.ts
+++ b/clients/client-storage-gateway/src/commands/NotifyWhenUploadedCommand.ts
@@ -38,15 +38,11 @@ export interface NotifyWhenUploadedCommandOutput extends NotifyWhenUploadedOutpu
* @public
*
Sends you notification through CloudWatch Events when all files written to your file * share have been uploaded to S3. Amazon S3.
- * *Storage Gateway can send a notification through Amazon CloudWatch Events when all * files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you * make a request for notification. When the upload is done, Storage Gateway sends you * notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to * send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways.
- * - * - * *For more information, see Getting file upload notification in the Storage Gateway User * Guide.
* @example diff --git a/clients/client-storage-gateway/src/commands/RefreshCacheCommand.ts b/clients/client-storage-gateway/src/commands/RefreshCacheCommand.ts index e26859f9c8a2b..09c2853f9b77f 100644 --- a/clients/client-storage-gateway/src/commands/RefreshCacheCommand.ts +++ b/clients/client-storage-gateway/src/commands/RefreshCacheCommand.ts @@ -42,23 +42,19 @@ export interface RefreshCacheCommandOutput extends RefreshCacheOutput, __Metadat * does not import files into the S3 File Gateway cache storage. It only updates the cached * inventory to reflect changes in the inventory of the objects in the S3 bucket. This * operation is only supported in the S3 File Gateway types. - * *You can subscribe to be notified through an Amazon CloudWatch event when your
* RefreshCache
operation completes. For more information, see Getting notified about file operations in the Storage Gateway
* User Guide. This operation is Only supported for S3 File Gateways.
When this API is called, it only initiates the refresh operation. When the API call
* completes and returns a success code, it doesn't necessarily mean that the file
* refresh has completed. You should use the refresh-complete notification to determine that
* the operation has completed before you check for new files on the gateway file share. You
* can subscribe to be notified through a CloudWatch event when your RefreshCache
* operation completes.
Throttle limit: This API is asynchronous, so the gateway will accept no more than two * refreshes at any time. We recommend using the refresh-complete CloudWatch event * notification before issuing additional requests. For more information, see Getting notified about file operations in the Storage Gateway * User Guide.
- * *The S3 bucket name does not need to be included when entering the list of folders in * the FolderList parameter.
*For more information, see Getting notified about file operations in the Storage Gateway * User Guide.
* @example diff --git a/clients/client-storage-gateway/src/commands/ResetCacheCommand.ts b/clients/client-storage-gateway/src/commands/ResetCacheCommand.ts index b683044014d32..1ac861f3bea44 100644 --- a/clients/client-storage-gateway/src/commands/ResetCacheCommand.ts +++ b/clients/client-storage-gateway/src/commands/ResetCacheCommand.ts @@ -42,7 +42,6 @@ export interface ResetCacheCommandOutput extends ResetCacheOutput, __MetadataBea * can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the * gateway loses its cache storage. At this point, you can reconfigure the disks as cache * disks. This operation is only supported in the cached volume and tape types. - * *If the cache disk you are resetting contains data that has not been uploaded to * Amazon S3 yet, that data can be lost. After you reset cache disks, there will diff --git a/clients/client-storage-gateway/src/commands/RetrieveTapeArchiveCommand.ts b/clients/client-storage-gateway/src/commands/RetrieveTapeArchiveCommand.ts index fac09430c6f1d..d56a33ce91917 100644 --- a/clients/client-storage-gateway/src/commands/RetrieveTapeArchiveCommand.ts +++ b/clients/client-storage-gateway/src/commands/RetrieveTapeArchiveCommand.ts @@ -40,7 +40,6 @@ export interface RetrieveTapeArchiveCommandOutput extends RetrieveTapeArchiveOut * Virtual tapes archived in the VTS are not associated with any gateway. However after a tape * is retrieved, it is associated with a gateway, even though it is also listed in the VTS, * that is, archive. This operation is only supported in the tape gateway type.
- * *Once a tape is successfully retrieved to a gateway, it cannot be retrieved again to * another gateway. You must archive the tape again before you can retrieve it to another * gateway. This operation is only supported in the tape gateway type.
diff --git a/clients/client-storage-gateway/src/commands/RetrieveTapeRecoveryPointCommand.ts b/clients/client-storage-gateway/src/commands/RetrieveTapeRecoveryPointCommand.ts index e69f0930ede94..5cde312cce089 100644 --- a/clients/client-storage-gateway/src/commands/RetrieveTapeRecoveryPointCommand.ts +++ b/clients/client-storage-gateway/src/commands/RetrieveTapeRecoveryPointCommand.ts @@ -38,11 +38,9 @@ export interface RetrieveTapeRecoveryPointCommandOutput extends RetrieveTapeReco * @public *Retrieves the recovery point for the specified virtual tape. This operation is only * supported in the tape gateway type.
- * *A recovery point is a point in time view of a virtual tape at which all the data on the * tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be * recovered to a new gateway.
- * *The virtual tape can be retrieved to only one gateway. The retrieved tape is * read-only. The virtual tape can be retrieved to only a tape gateway. There is no charge diff --git a/clients/client-storage-gateway/src/commands/ShutdownGatewayCommand.ts b/clients/client-storage-gateway/src/commands/ShutdownGatewayCommand.ts index d1b5b18258c45..338f0aefba190 100644 --- a/clients/client-storage-gateway/src/commands/ShutdownGatewayCommand.ts +++ b/clients/client-storage-gateway/src/commands/ShutdownGatewayCommand.ts @@ -38,26 +38,21 @@ export interface ShutdownGatewayCommandOutput extends ShutdownGatewayOutput, __M * @public *
Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource * Name (ARN) of the gateway in the body of your request.
- * *The operation shuts down the gateway service component running in the gateway's * virtual machine (VM) and not the host VM.
- * *If you want to shut down the VM, it is recommended that you first shut down the * gateway component in the VM to avoid unpredictable conditions.
*After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. * Your applications cannot read from or write to the gateway's storage volumes, and * there are no snapshots taken.
- * *When you make a shutdown request, you will get a 200 OK
success response
* immediately. However, it might take some time for the gateway to shut down. You can call
* the DescribeGatewayInformation API to check the status. For more
* information, see ActivateGateway.
If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the * gateway.
* @example diff --git a/clients/client-storage-gateway/src/commands/StartGatewayCommand.ts b/clients/client-storage-gateway/src/commands/StartGatewayCommand.ts index 981c95087dcac..4c8c0fc7333ce 100644 --- a/clients/client-storage-gateway/src/commands/StartGatewayCommand.ts +++ b/clients/client-storage-gateway/src/commands/StartGatewayCommand.ts @@ -40,13 +40,11 @@ export interface StartGatewayCommandOutput extends StartGatewayOutput, __Metadat * After the gateway starts, you can then make other API calls, your applications can read * from or write to the gateway's storage volumes and you will be able to take snapshot * backups. - * *When you make a request, you will get a 200 OK success response immediately. However, * it might take some time for the gateway to be ready. You should call DescribeGatewayInformation and check the status before making any * additional API calls. For more information, see ActivateGateway.
*To specify which gateway to start, use the Amazon Resource Name (ARN) of the gateway in * your request.
* @example diff --git a/clients/client-storage-gateway/src/commands/UpdateAutomaticTapeCreationPolicyCommand.ts b/clients/client-storage-gateway/src/commands/UpdateAutomaticTapeCreationPolicyCommand.ts index 1c8e749b1ee07..5fccb520d317c 100644 --- a/clients/client-storage-gateway/src/commands/UpdateAutomaticTapeCreationPolicyCommand.ts +++ b/clients/client-storage-gateway/src/commands/UpdateAutomaticTapeCreationPolicyCommand.ts @@ -44,9 +44,7 @@ export interface UpdateAutomaticTapeCreationPolicyCommandOutput *Updates the automatic tape creation policy of a gateway. Use this to update the policy * with a new set of automatic tape creation rules. This is only supported for tape * gateways.
- * *By default, there is no automatic tape creation policy.
- * *A gateway can have only one automatic tape creation policy.
*By default, a gateway's bandwidth rate limits are not set. If you don't set * any limit, the gateway does not have any limitations on its bandwidth usage and could * potentially use the maximum available bandwidth.
- * *To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in * your request.
* @example diff --git a/clients/client-storage-gateway/src/commands/UpdateChapCredentialsCommand.ts b/clients/client-storage-gateway/src/commands/UpdateChapCredentialsCommand.ts index 1799409aa28f3..7a8411dd6d486 100644 --- a/clients/client-storage-gateway/src/commands/UpdateChapCredentialsCommand.ts +++ b/clients/client-storage-gateway/src/commands/UpdateChapCredentialsCommand.ts @@ -44,7 +44,6 @@ export interface UpdateChapCredentialsCommandOutput extends UpdateChapCredential * specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for * added security, you might use it. This operation is supported in the volume and tape * gateway types. - * *When you update CHAP credentials, all existing connections on the target are closed * and initiators must reconnect with the new credentials.
diff --git a/clients/client-storage-gateway/src/commands/UpdateGatewayInformationCommand.ts b/clients/client-storage-gateway/src/commands/UpdateGatewayInformationCommand.ts index b07d6e0cb9610..b48560de83ca2 100644 --- a/clients/client-storage-gateway/src/commands/UpdateGatewayInformationCommand.ts +++ b/clients/client-storage-gateway/src/commands/UpdateGatewayInformationCommand.ts @@ -39,7 +39,6 @@ export interface UpdateGatewayInformationCommandOutput extends UpdateGatewayInfo *Updates a gateway's metadata, which includes the gateway's name and time zone. * To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in * your request.
- * *For gateways activated after September 2, 2015, the gateway's ARN contains the * gateway ID rather than the gateway name. However, changing the name of the gateway has @@ -56,7 +55,7 @@ export interface UpdateGatewayInformationCommandOutput extends UpdateGatewayInfo * GatewayName: "STRING_VALUE", * GatewayTimezone: "STRING_VALUE", * CloudWatchLogGroupARN: "STRING_VALUE", - * GatewayCapacity: "STRING_VALUE", + * GatewayCapacity: "Small" || "Medium" || "Large", * }; * const command = new UpdateGatewayInformationCommand(input); * const response = await client.send(command); diff --git a/clients/client-storage-gateway/src/commands/UpdateGatewaySoftwareNowCommand.ts b/clients/client-storage-gateway/src/commands/UpdateGatewaySoftwareNowCommand.ts index e6e2b68ab071c..9c9e28f1c5f52 100644 --- a/clients/client-storage-gateway/src/commands/UpdateGatewaySoftwareNowCommand.ts +++ b/clients/client-storage-gateway/src/commands/UpdateGatewaySoftwareNowCommand.ts @@ -38,14 +38,12 @@ export interface UpdateGatewaySoftwareNowCommandOutput extends UpdateGatewaySoft * @public *
Updates the gateway virtual machine (VM) software. The request immediately triggers the * software update.
- * *When you make this request, you get a 200 OK
success response
* immediately. However, it might take some time for the update to complete. You can call
* DescribeGatewayInformation to verify the gateway is in the
* STATE_RUNNING
state.
A software update forces a system restart of your gateway. You can minimize the * chance of any disruption to your applications by increasing your iSCSI Initiators' diff --git a/clients/client-storage-gateway/src/commands/UpdateNFSFileShareCommand.ts b/clients/client-storage-gateway/src/commands/UpdateNFSFileShareCommand.ts index a03d8a7f31c15..573026d371dde 100644 --- a/clients/client-storage-gateway/src/commands/UpdateNFSFileShareCommand.ts +++ b/clients/client-storage-gateway/src/commands/UpdateNFSFileShareCommand.ts @@ -38,14 +38,11 @@ export interface UpdateNFSFileShareCommandOutput extends UpdateNFSFileShareOutpu * @public *
Updates a Network File System (NFS) file share. This operation is only supported in S3 * File Gateways.
- * *To leave a file share field unchanged, set the corresponding input field to * null.
*Updates the following file share settings:
- * *Default storage class for your S3 bucket
@@ -80,7 +77,7 @@ export interface UpdateNFSFileShareCommandOutput extends UpdateNFSFileShareOutpu * OwnerId: Number("long"), * }, * DefaultStorageClass: "STRING_VALUE", - * ObjectACL: "STRING_VALUE", + * ObjectACL: "private" || "public-read" || "public-read-write" || "authenticated-read" || "bucket-owner-read" || "bucket-owner-full-control" || "aws-exec-read", * ClientList: [ // FileShareClientList * "STRING_VALUE", * ], diff --git a/clients/client-storage-gateway/src/commands/UpdateSMBFileShareCommand.ts b/clients/client-storage-gateway/src/commands/UpdateSMBFileShareCommand.ts index 74a48fb5ae335..40f2d54e67cbc 100644 --- a/clients/client-storage-gateway/src/commands/UpdateSMBFileShareCommand.ts +++ b/clients/client-storage-gateway/src/commands/UpdateSMBFileShareCommand.ts @@ -38,12 +38,10 @@ export interface UpdateSMBFileShareCommandOutput extends UpdateSMBFileShareOutpu * @public *Updates a Server Message Block (SMB) file share. This operation is only supported for S3 * File Gateways.
- * *To leave a file share field unchanged, set the corresponding input field to * null.
*File gateways require Security Token Service (Amazon Web Services STS) to be * activated to enable you to create a file share. Make sure that Amazon Web Services STS @@ -52,7 +50,6 @@ export interface UpdateSMBFileShareCommandOutput extends UpdateSMBFileShareOutpu * it. For information about how to activate Amazon Web Services STS, see Activating and * deactivating Amazon Web Services STS in an Amazon Web Services Region in the * Identity and Access Management User Guide.
- * *File gateways don't support creating hard or symbolic links on a file * share.
*Updates the SMB security strategy on a file gateway. This action is only supported in * file gateways.
- * *This API is called Security level in the User Guide.
- * *A higher security level can affect performance of the gateway.
*Updates a snapshot schedule configured for a gateway volume. This operation is only * supported in the cached volume and stored volume gateway types.
- * *The default snapshot schedule for volume is once every 24 hours, starting at the * creation time of the volume. You can use this API to change the snapshot schedule * configured for the volume.
- * *In the request you must identify the gateway volume whose snapshot schedule you want to * update, and the schedule information, including when you want the snapshot to begin on a * day and the frequency (in hours) of snapshots.
diff --git a/clients/client-storage-gateway/src/endpoint/ruleset.ts b/clients/client-storage-gateway/src/endpoint/ruleset.ts index 420220818ce46..8605ca0f42912 100644 --- a/clients/client-storage-gateway/src/endpoint/ruleset.ts +++ b/clients/client-storage-gateway/src/endpoint/ruleset.ts @@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/storage-gateway.json */ -const p="required", -q="fn", -r="argv", -s="ref"; -const a="PartitionResult", +const q="required", +r="fn", +s="argv", +t="ref"; +const a="isSet", b="tree", c="error", d="endpoint", -e={[p]:false,"type":"String"}, -f={[p]:true,"default":false,"type":"Boolean"}, -g={[s]:"Endpoint"}, -h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]}, -i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]}, -j={}, -k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]}, -l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]}, -m=[g], -n=[h], -o=[i]; -const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://storagegateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://storagegateway-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://storagegateway.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://storagegateway.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]}; +e="PartitionResult", +f={[q]:false,"type":"String"}, +g={[q]:true,"default":false,"type":"Boolean"}, +h={[t]:"Endpoint"}, +i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]}, +j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]}, +k={}, +l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]}, +m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]}, +n=[i], +o=[j], +p=[{[t]:"Region"}]; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://storagegateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://storagegateway-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://storagegateway.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://storagegateway.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-storage-gateway/src/index.ts b/clients/client-storage-gateway/src/index.ts index 22af591c9f4fb..faf334fc5f65b 100644 --- a/clients/client-storage-gateway/src/index.ts +++ b/clients/client-storage-gateway/src/index.ts @@ -2,15 +2,12 @@ /* eslint-disable */ /** *Storage Gateway is the service that connects an on-premises software appliance * with cloud-based storage to provide seamless and secure integration between an * organization's on-premises IT environment and the Amazon Web Services storage * infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery.
- * *Use the following links to get started using the Storage Gateway * Service API Reference:
- * *@@ -38,7 +35,6 @@ * and the endpoints available for use with Storage Gateway.
*Storage Gateway resource IDs are in uppercase. When you use these resource IDs
* with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change
@@ -47,7 +43,6 @@
* this ID with the EC2 API, you must change it to vol-aa22bb012345daf670
.
* Otherwise, the EC2 API might not behave as expected.
IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway * volumes are changing to a longer format. Starting in December 2016, all new volumes and @@ -55,16 +50,12 @@ * be able to use these longer IDs so you can test your systems with the new format. For * more information, see Longer EC2 and * EBS resource IDs.
- * *For example, a volume Amazon Resource Name (ARN) with the longer volume ID format * looks like the following:
- * *
* arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG
.
A snapshot ID with the longer ID format looks like the following:
* snap-78e226633445566ee
.
For more information, see Announcement: * Heads-up – Longer Storage Gateway volume and snapshot IDs coming in * 2016.
diff --git a/clients/client-storage-gateway/src/models/models_0.ts b/clients/client-storage-gateway/src/models/models_0.ts index 26cd95da1efcc..a59a67ea4bd07 100644 --- a/clients/client-storage-gateway/src/models/models_0.ts +++ b/clients/client-storage-gateway/src/models/models_0.ts @@ -26,7 +26,6 @@ export interface Tag { /** * @public *A JSON object containing one or more of the following fields:
- * *
@@ -75,9 +74,6 @@ export interface ActivateGatewayInput {
* parameters, however, these are merely defaults -- the arguments you pass to the
* ActivateGateway
API call determine the actual configuration of your
* gateway.
For more information, see Getting activation * key in the Storage Gateway User Guide.
*/ @@ -107,7 +103,6 @@ export interface ActivateGatewayInput { * information about available Amazon Web Services Regions and endpoints for Storage Gateway, see * Storage Gateway endpoints and quotas in the Amazon Web Services * General Reference. - * *Valid Values: See * Storage Gateway endpoints and quotas in the Amazon Web Services * General Reference. @@ -121,7 +116,6 @@ export interface ActivateGatewayInput { *
A value that defines the type of gateway to activate. The type specified is critical to
* all later functions of the gateway and cannot be changed after activation. The default
* value is CACHED
.
Valid Values: STORED
| CACHED
| VTL
|
* VTL_SNOW
| FILE_S3
| FILE_FSX_SMB
*
The value that indicates the type of tape drive to use for tape gateway. This field is * optional.
- * *Valid Values: IBM-ULT3580-TD5
*
The value that indicates the type of medium changer to use for tape gateway. This field * is optional.
- * *Valid Values: STK-L700
| AWS-Gateway-VTL
|
* IBM-03584L32-0402
*
A list of up to 50 tags that you can assign to the gateway. Each tag is a key-value * pair.
- * *Valid characters for key and value are letters, spaces, and numbers that can be * represented in UTF-8 format, and the following special characters: + - = . _ : / @. The @@ -169,7 +160,6 @@ export interface ActivateGatewayInput { *
Storage Gateway returns the Amazon Resource Name (ARN) of the activated gateway. It * is a string made of information such as your account, gateway name, and Amazon Web Services Region. This ARN is used to reference the gateway in other API operations as * well as resource-based authorization.
- * *For gateways activated prior to September 02, 2015, the gateway ARN contains the * gateway name rather than the gateway ID. Changing the name of the gateway has no effect @@ -187,70 +177,77 @@ export interface ActivateGatewayOutput { /** * @public + * @enum */ -export type ErrorCode = - | "ActivationKeyExpired" - | "ActivationKeyInvalid" - | "ActivationKeyNotFound" - | "AuthenticationFailure" - | "BandwidthThrottleScheduleNotFound" - | "Blocked" - | "CannotExportSnapshot" - | "ChapCredentialNotFound" - | "DiskAlreadyAllocated" - | "DiskDoesNotExist" - | "DiskSizeGreaterThanVolumeMaxSize" - | "DiskSizeLessThanVolumeSize" - | "DiskSizeNotGigAligned" - | "DuplicateCertificateInfo" - | "DuplicateSchedule" - | "EndpointNotFound" - | "GatewayInternalError" - | "GatewayNotConnected" - | "GatewayNotFound" - | "GatewayProxyNetworkConnectionBusy" - | "IAMNotSupported" - | "InitiatorInvalid" - | "InitiatorNotFound" - | "InternalError" - | "InvalidEndpoint" - | "InvalidGateway" - | "InvalidParameters" - | "InvalidSchedule" - | "JoinDomainInProgress" - | "LocalStorageLimitExceeded" - | "LunAlreadyAllocated " - | "LunInvalid" - | "MaximumContentLengthExceeded" - | "MaximumTapeCartridgeCountExceeded" - | "MaximumVolumeCountExceeded" - | "NetworkConfigurationChanged" - | "NoDisksAvailable" - | "NotImplemented" - | "NotSupported" - | "OperationAborted" - | "OutdatedGateway" - | "ParametersNotImplemented" - | "RegionInvalid" - | "RequestTimeout" - | "ServiceUnavailable" - | "SnapshotDeleted" - | "SnapshotIdInvalid" - | "SnapshotInProgress" - | "SnapshotNotFound" - | "SnapshotScheduleNotFound" - | "StagingAreaFull" - | "StorageFailure" - | "TapeCartridgeNotFound" - | "TargetAlreadyExists" - | "TargetInvalid" - | "TargetNotFound" - | "UnauthorizedOperation" - | "VolumeAlreadyExists" - | "VolumeIdInvalid" - | "VolumeInUse" - | "VolumeNotFound" - | "VolumeNotReady"; +export const ErrorCode = { + ActivationKeyExpired: "ActivationKeyExpired", + ActivationKeyInvalid: "ActivationKeyInvalid", + ActivationKeyNotFound: "ActivationKeyNotFound", + AuthenticationFailure: "AuthenticationFailure", + BandwidthThrottleScheduleNotFound: "BandwidthThrottleScheduleNotFound", + Blocked: "Blocked", + CannotExportSnapshot: "CannotExportSnapshot", + ChapCredentialNotFound: "ChapCredentialNotFound", + DiskAlreadyAllocated: "DiskAlreadyAllocated", + DiskDoesNotExist: "DiskDoesNotExist", + DiskSizeGreaterThanVolumeMaxSize: "DiskSizeGreaterThanVolumeMaxSize", + DiskSizeLessThanVolumeSize: "DiskSizeLessThanVolumeSize", + DiskSizeNotGigAligned: "DiskSizeNotGigAligned", + DuplicateCertificateInfo: "DuplicateCertificateInfo", + DuplicateSchedule: "DuplicateSchedule", + EndpointNotFound: "EndpointNotFound", + GatewayInternalError: "GatewayInternalError", + GatewayNotConnected: "GatewayNotConnected", + GatewayNotFound: "GatewayNotFound", + GatewayProxyNetworkConnectionBusy: "GatewayProxyNetworkConnectionBusy", + IAMNotSupported: "IAMNotSupported", + InitiatorInvalid: "InitiatorInvalid", + InitiatorNotFound: "InitiatorNotFound", + InternalError: "InternalError", + InvalidEndpoint: "InvalidEndpoint", + InvalidGateway: "InvalidGateway", + InvalidParameters: "InvalidParameters", + InvalidSchedule: "InvalidSchedule", + JoinDomainInProgress: "JoinDomainInProgress", + LocalStorageLimitExceeded: "LocalStorageLimitExceeded", + LunAlreadyAllocated_: "LunAlreadyAllocated ", + LunInvalid: "LunInvalid", + MaximumContentLengthExceeded: "MaximumContentLengthExceeded", + MaximumTapeCartridgeCountExceeded: "MaximumTapeCartridgeCountExceeded", + MaximumVolumeCountExceeded: "MaximumVolumeCountExceeded", + NetworkConfigurationChanged: "NetworkConfigurationChanged", + NoDisksAvailable: "NoDisksAvailable", + NotImplemented: "NotImplemented", + NotSupported: "NotSupported", + OperationAborted: "OperationAborted", + OutdatedGateway: "OutdatedGateway", + ParametersNotImplemented: "ParametersNotImplemented", + RegionInvalid: "RegionInvalid", + RequestTimeout: "RequestTimeout", + ServiceUnavailable: "ServiceUnavailable", + SnapshotDeleted: "SnapshotDeleted", + SnapshotIdInvalid: "SnapshotIdInvalid", + SnapshotInProgress: "SnapshotInProgress", + SnapshotNotFound: "SnapshotNotFound", + SnapshotScheduleNotFound: "SnapshotScheduleNotFound", + StagingAreaFull: "StagingAreaFull", + StorageFailure: "StorageFailure", + TapeCartridgeNotFound: "TapeCartridgeNotFound", + TargetAlreadyExists: "TargetAlreadyExists", + TargetInvalid: "TargetInvalid", + TargetNotFound: "TargetNotFound", + UnauthorizedOperation: "UnauthorizedOperation", + VolumeAlreadyExists: "VolumeAlreadyExists", + VolumeIdInvalid: "VolumeIdInvalid", + VolumeInUse: "VolumeInUse", + VolumeNotFound: "VolumeNotFound", + VolumeNotReady: "VolumeNotReady", +} as const; + +/** + * @public + */ +export type ErrorCode = (typeof ErrorCode)[keyof typeof ErrorCode]; /** * @public @@ -330,17 +327,24 @@ export class InvalidGatewayRequestException extends __BaseException { } } +/** + * @public + * @enum + */ +export const ActiveDirectoryStatus = { + ACCESS_DENIED: "ACCESS_DENIED", + DETACHED: "DETACHED", + JOINED: "JOINED", + JOINING: "JOINING", + NETWORK_ERROR: "NETWORK_ERROR", + TIMEOUT: "TIMEOUT", + UNKNOWN_ERROR: "UNKNOWN_ERROR", +} as const; + /** * @public */ -export type ActiveDirectoryStatus = - | "ACCESS_DENIED" - | "DETACHED" - | "JOINED" - | "JOINING" - | "NETWORK_ERROR" - | "TIMEOUT" - | "UNKNOWN_ERROR"; +export type ActiveDirectoryStatus = (typeof ActiveDirectoryStatus)[keyof typeof ActiveDirectoryStatus]; /** * @public @@ -389,7 +393,6 @@ export interface AddTagsToResourceInput { * @public *
The key-value pair that represents the tag you want to add to the resource. The value * can be an empty string.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -447,7 +450,6 @@ export interface AddUploadBufferOutput { /** * @public *
A JSON object containing one or more of the following fields:
- * *
@@ -515,7 +517,6 @@ export interface AssignTapePoolInput {
* BypassGovernanceRetention
, setting this to TRUE enables the user to bypass
* the retention lock. This parameter is set to true by default for calls from the
* console.
Valid values: TRUE
| FALSE
*
Valid Values:0, 300 to 2,592,000 seconds (5 minutes to 30 days)
*/ CacheStaleTimeoutInSeconds?: number; @@ -668,7 +668,6 @@ export interface AttachVolumeInput { * myvolume results in the target ARN of *arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume
.
* The target name must be unique across all volumes on a gateway.
- *
* If you don't specify a value, Storage Gateway uses the value that was previously * used for this volume as the new target name.
*/ @@ -685,7 +684,6 @@ export interface AttachVolumeInput { *The network interface of the gateway on which to expose the iSCSI target. Only IPv4 * addresses are accepted. Use DescribeGatewayInformation to get a list of * the network interfaces available on a gateway.
- * *Valid Values: A valid IP address.
*/ NetworkInterfaceId: string | undefined; @@ -729,7 +727,6 @@ export interface AutomaticTapeCreationRule { * @public *A prefix that you append to the barcode of the virtual tape that you are creating. This * prefix makes the barcode unique.
- * *The prefix must be 1-4 characters in length and must be one of the uppercase letters * from A to Z.
@@ -793,10 +790,21 @@ export interface AutomaticTapeCreationPolicyInfo { GatewayARN?: string; } +/** + * @public + * @enum + */ +export const AvailabilityMonitorTestStatus = { + COMPLETE: "COMPLETE", + FAILED: "FAILED", + PENDING: "PENDING", +} as const; + /** * @public */ -export type AvailabilityMonitorTestStatus = "COMPLETE" | "FAILED" | "PENDING"; +export type AvailabilityMonitorTestStatus = + (typeof AvailabilityMonitorTestStatus)[keyof typeof AvailabilityMonitorTestStatus]; /** * @public @@ -829,7 +837,6 @@ export interface BandwidthRateLimitInterval { /** * @public *The minute of the hour to end the bandwidth rate limit interval.
- * * The bandwidth rate limit interval ends at the end of the minute. To end an interval
* at the end of an hour, use the value 59
.
VolumeUsedInBytes
is different from the compressed size of the
* volume, which is the value that is used to calculate your bill.
- *
* This value is not available for volumes created prior to May 13, 2015, until you * store data on the volume.
- * *If you use a delete tool that overwrites the data on your volume with random data,
* your usage will not be reduced. This is because the random data is not compressible. If
* you want to reduce the amount of billed storage on your volume, we recommend overwriting
@@ -1004,7 +1009,6 @@ export interface CachediSCSIVolume {
* myvolume results in the target ARN of
* arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume
.
* The target name must be unique across all volumes on a gateway.
If you don't specify a value, Storage Gateway uses the value that was previously * used for this volume as the new target name.
*/ @@ -1079,8 +1083,17 @@ export interface CancelRetrievalOutput { /** * @public + * @enum */ -export type CaseSensitivity = "CaseSensitive" | "ClientSpecified"; +export const CaseSensitivity = { + CaseSensitive: "CaseSensitive", + ClientSpecified: "ClientSpecified", +} as const; + +/** + * @public + */ +export type CaseSensitivity = (typeof CaseSensitivity)[keyof typeof CaseSensitivity]; /** * @public @@ -1091,7 +1104,6 @@ export interface ChapInfo { /** * @public *The Amazon Resource Name (ARN) of the volume.
- * *Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
*/ TargetARN?: string; @@ -1150,7 +1162,6 @@ export interface CreateCachediSCSIVolumeInput { * myvolume results in the target ARN of *arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume
.
* The target name must be unique across all volumes on a gateway.
- *
* If you don't specify a value, Storage Gateway uses the value that was previously * used for this volume as the new target name.
*/ @@ -1170,7 +1181,6 @@ export interface CreateCachediSCSIVolumeInput { *The network interface of the gateway on which to expose the iSCSI target. Only IPv4 * addresses are accepted. Use DescribeGatewayInformation to get a list of * the network interfaces available on a gateway.
- * *Valid Values: A valid IP address.
*/ NetworkInterfaceId: string | undefined; @@ -1187,7 +1197,6 @@ export interface CreateCachediSCSIVolumeInput { *Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
A list of up to 50 tags that you can assign to a cached volume. Each tag is a key-value * pair.
- * *Valid characters for key and value are letters, spaces, and numbers that you can * represent in UTF-8 format, and the following special characters: + - = . _ : / @. The @@ -1274,15 +1282,22 @@ export interface NFSFileShareDefaults { /** * @public + * @enum */ -export type ObjectACL = - | "authenticated-read" - | "aws-exec-read" - | "bucket-owner-full-control" - | "bucket-owner-read" - | "private" - | "public-read" - | "public-read-write"; +export const ObjectACL = { + authenticated_read: "authenticated-read", + aws_exec_read: "aws-exec-read", + bucket_owner_full_control: "bucket-owner-full-control", + bucket_owner_read: "bucket-owner-read", + private: "private", + public_read: "public-read", + public_read_write: "public-read-write", +} as const; + +/** + * @public + */ +export type ObjectACL = (typeof ObjectACL)[keyof typeof ObjectACL]; /** * @public @@ -1314,7 +1329,6 @@ export interface CreateNFSFileShareInput { *
Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
You can specify LocationARN as a bucket ARN, access point ARN or access point alias, * as shown in the following examples.
- * *Bucket ARN:
*
* arn:aws:s3:::my-bucket/prefix/
*
Access point ARN:
*
* arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/
*
If you specify an access point, the bucket policy must be configured to delegate * access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.
- * *Access point alias:
*
* test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias
@@ -1368,7 +1378,6 @@ export interface CreateNFSFileShareInput {
* @public
*
The default storage class for objects put into an Amazon S3 bucket by the S3
* File Gateway. The default value is S3_STANDARD
. Optional.
Valid Values: S3_STANDARD
| S3_INTELLIGENT_TIERING
|
* S3_STANDARD_IA
| S3_ONEZONE_IA
*
A value that maps a user to anonymous user.
- * *Valid values are the following:
- * *@@ -1416,7 +1423,6 @@ export interface CreateNFSFileShareInput { * @public *
A value that sets the write status of a file share. Set this value to true
* to set the write status to read-only, otherwise set to false
.
Valid Values: true
| false
*
A value that enables guessing of the MIME type for uploaded objects based on file
* extensions. Set this value to true
to enable MIME type guessing, otherwise set
* to false
. The default value is true
.
Valid Values: true
| false
*
true
, the requester pays
* the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays
* the cost of storing data.
- *
*
* RequesterPays
is a configuration for the S3 bucket that backs the file
* share, so make sure that the configuration on the file share is the same as the S3
* bucket configuration.
Valid Values: true
| false
*
A list of up to 50 tags that can be assigned to the NFS file share. Each tag is a * key-value pair.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -1469,7 +1471,6 @@ export interface CreateNFSFileShareInput { /** * @public *
The name of the file share. Optional.
- * *
* FileShareName
must be set if an S3 prefix name is set in
@@ -1491,22 +1492,17 @@ export interface CreateNFSFileShareInput {
* generating an ObjectUploaded
notification. Because clients can make many small
* writes to files, it's best to set this parameter for as long as possible to avoid
* generating multiple notifications for the same file in a small time period.
* SettlingTimeInSeconds
has no effect on the timing of the object
* uploading to Amazon S3, only the timing of the notification.
The following example sets NotificationPolicy
on with
* SettlingTimeInSeconds
set to 60.
* \{\"Upload\": \{\"SettlingTimeInSeconds\": 60\}\}
*
The following example sets NotificationPolicy
off.
* \{\}
*
Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
You can specify LocationARN as a bucket ARN, access point ARN or access point alias, * as shown in the following examples.
- * *Bucket ARN:
*
* arn:aws:s3:::my-bucket/prefix/
*
Access point ARN:
*
* arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/
*
If you specify an access point, the bucket policy must be configured to delegate * access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.
- * *Access point alias:
*
* test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias
@@ -1632,7 +1623,6 @@ export interface CreateSMBFileShareInput {
* @public
*
The default storage class for objects put into an Amazon S3 bucket by the S3
* File Gateway. The default value is S3_STANDARD
. Optional.
Valid Values: S3_STANDARD
| S3_INTELLIGENT_TIERING
|
* S3_STANDARD_IA
| S3_ONEZONE_IA
*
A value that sets the write status of a file share. Set this value to true
* to set the write status to read-only, otherwise set to false
.
Valid Values: true
| false
*
A value that enables guessing of the MIME type for uploaded objects based on file
* extensions. Set this value to true
to enable MIME type guessing, otherwise set
* to false
. The default value is true
.
Valid Values: true
| false
*
true
, the requester pays
* the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays
* the cost of storing data.
- *
*
* RequesterPays
is a configuration for the S3 bucket that backs the file
* share, so make sure that the configuration on the file share is the same as the S3
* bucket configuration.
Valid Values: true
| false
*
Set this value to true
to enable access control list (ACL) on the SMB file
* share. Set it to false
to map file and directory permissions to the POSIX
* permissions.
For more information, see Using Microsoft Windows ACLs to * control access to an SMB file share in the Storage Gateway User * Guide.
- * *Valid Values: true
| false
*
DOMAIN\User1
, user1
,
* @group1
, and @DOMAIN\group1
.
- *
* Use this option very carefully, because any user in this list can do anything they * like on the file share, regardless of file permissions.
@@ -1754,7 +1735,6 @@ export interface CreateSMBFileShareInput { * @public *The authentication method that users use to access the file share. The default is
* ActiveDirectory
.
Valid Values: ActiveDirectory
| GuestAccess
*
A list of up to 50 tags that can be assigned to the NFS file share. Each tag is a * key-value pair.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -1786,7 +1765,6 @@ export interface CreateSMBFileShareInput { /** * @public *
The name of the file share. Optional.
- * *
* FileShareName
must be set if an S3 prefix name is set in
@@ -1808,22 +1786,17 @@ export interface CreateSMBFileShareInput {
* generating an ObjectUploaded
notification. Because clients can make many small
* writes to files, it's best to set this parameter for as long as possible to avoid
* generating multiple notifications for the same file in a small time period.
* SettlingTimeInSeconds
has no effect on the timing of the object
* uploading to Amazon S3, only the timing of the notification.
The following example sets NotificationPolicy
on with
* SettlingTimeInSeconds
set to 60.
* \{\"Upload\": \{\"SettlingTimeInSeconds\": 60\}\}
*
The following example sets NotificationPolicy
off.
* \{\}
*
A JSON object containing one or more of the following fields:
- * *@@ -1916,7 +1888,6 @@ export interface CreateSnapshotInput { * @public *
A list of up to 50 tags that can be assigned to a snapshot. Each tag is a key-value * pair.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -2000,7 +1971,6 @@ export interface CreateSnapshotFromVolumeRecoveryPointInput { * @public *
A list of up to 50 tags that can be assigned to a snapshot. Each tag is a key-value * pair.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -2038,7 +2008,6 @@ export interface CreateSnapshotFromVolumeRecoveryPointOutput { /** * @public *
A JSON object containing one or more of the following fields:
- * *@@ -2096,7 +2065,6 @@ export interface CreateStorediSCSIVolumeInput { * @public *
Set to true
if you want to preserve the data on the local disk. Otherwise,
* set to false
to create an empty volume.
Valid Values: true
| false
*
arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume
.
* The target name must be unique across all volumes on a gateway.
- *
* If you don't specify a value, Storage Gateway uses the value that was previously * used for this volume as the new target name.
*/ @@ -2120,7 +2087,6 @@ export interface CreateStorediSCSIVolumeInput { *The network interface of the gateway on which to expose the iSCSI target. Only IPv4 * addresses are accepted. Use DescribeGatewayInformation to get a list of * the network interfaces available on a gateway.
- * *Valid Values: A valid IP address.
*/ NetworkInterfaceId: string | undefined; @@ -2130,7 +2096,6 @@ export interface CreateStorediSCSIVolumeInput { *Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
A list of up to 50 tags that can be assigned to a stored volume. Each tag is a key-value * pair.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -2185,13 +2149,32 @@ export interface CreateStorediSCSIVolumeOutput { /** * @public + * @enum */ -export type RetentionLockType = "COMPLIANCE" | "GOVERNANCE" | "NONE"; +export const RetentionLockType = { + COMPLIANCE: "COMPLIANCE", + GOVERNANCE: "GOVERNANCE", + NONE: "NONE", +} as const; /** * @public */ -export type TapeStorageClass = "DEEP_ARCHIVE" | "GLACIER"; +export type RetentionLockType = (typeof RetentionLockType)[keyof typeof RetentionLockType]; + +/** + * @public + * @enum + */ +export const TapeStorageClass = { + DEEP_ARCHIVE: "DEEP_ARCHIVE", + GLACIER: "GLACIER", +} as const; + +/** + * @public + */ +export type TapeStorageClass = (typeof TapeStorageClass)[keyof typeof TapeStorageClass]; /** * @public @@ -2231,7 +2214,6 @@ export interface CreateTapePoolInput { * @public *
A list of up to 50 tags that can be assigned to tape pool. Each tag is a key-value * pair.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -2271,7 +2253,6 @@ export interface CreateTapesInput { /** * @public *
The size, in bytes, of the virtual tapes that you want to create.
- * *The size must be aligned by gigabyte (1024*1024*1024 bytes).
*A unique identifier that you use to retry a request. If you retry a request, use the
* same ClientToken
you specified in the initial request.
Using the same ClientToken
prevents creating the tape multiple
* times.
A prefix that you append to the barcode of the virtual tape you are creating. This * prefix makes the barcode unique.
- * *The prefix must be 1-4 characters in length and must be one of the uppercase letters * from A to Z.
@@ -2313,7 +2292,6 @@ export interface CreateTapesInput { *Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
A list of up to 50 tags that can be assigned to a virtual tape. Each tag is a key-value * pair.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -2386,7 +2363,6 @@ export interface CreateTapeWithBarcodeInput { /** * @public *
The size, in bytes, of the virtual tape that you want to create.
- * *The size must be aligned by gigabyte (1024*1024*1024 bytes).
*The barcode that you want to assign to the tape.
- * *Barcodes cannot be reused. This includes barcodes used for tapes that have been * deleted.
@@ -2409,7 +2384,6 @@ export interface CreateTapeWithBarcodeInput { *Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
A list of up to 50 tags that can be assigned to a virtual tape that has a barcode. Each * tag is a key-value pair.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -2493,7 +2466,6 @@ export interface DeleteAutomaticTapeCreationPolicyOutput { /** * @public *
A JSON object containing the following fields:
- * *@@ -2514,7 +2486,6 @@ export interface DeleteBandwidthRateLimitInput { * @public *
One of the BandwidthType values that indicates the gateway bandwidth rate limit to * delete.
- * *Valid Values: UPLOAD
| DOWNLOAD
| ALL
*
A JSON object containing one or more of the following fields:
- * *
@@ -2602,7 +2572,6 @@ export interface DeleteFileShareInput {
* immediately and aborts all data uploads to Amazon Web Services. Otherwise, the file share is
* not deleted until all data is uploaded to Amazon Web Services. This process aborts the data
* upload process, and the file share enters the FORCE_DELETING
status.
Valid Values: true
| false
*
* InitiatorName: The iSCSI initiator that connects to * the target.
- * ** SecretToAuthenticateInitiator: The secret key that * the initiator (for example, the Windows client) must provide to participate in mutual * CHAP with the target.
- * ** SecretToAuthenticateTarget: The secret key that the * target must provide to participate in mutual CHAP with the initiator (e.g. Windows * client).
- * ** TargetARN: The Amazon Resource Name (ARN) of the * storage volume.
- * *The Media Access Control (MAC) address of the interface.
- * *This is currently unsupported and will not be returned in output.
*The type of endpoint for your gateway.
- * *Valid Values: STANDARD
| FIPS
*
A JSON object containing the following fields:
- * *@@ -3515,7 +3489,6 @@ export interface NFSFileShareInfo { /** * @public *
The status of the file share.
- * *Valid Values: CREATING
| UPDATING
| AVAILABLE
|
* DELETING
*
Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
You can specify LocationARN as a bucket ARN, access point ARN or access point alias, * as shown in the following examples.
- * *Bucket ARN:
*
* arn:aws:s3:::my-bucket/prefix/
*
Access point ARN:
*
* arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/
*
If you specify an access point, the bucket policy must be configured to delegate * access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.
- * *Access point alias:
*
* test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias
@@ -3594,7 +3562,6 @@ export interface NFSFileShareInfo {
* @public
*
The default storage class for objects put into an Amazon S3 bucket by the S3
* File Gateway. The default value is S3_STANDARD
. Optional.
Valid Values: S3_STANDARD
| S3_INTELLIGENT_TIERING
|
* S3_STANDARD_IA
| S3_ONEZONE_IA
*
The user mapped to anonymous user. Valid options are the following:
- * *@@ -3641,7 +3607,6 @@ export interface NFSFileShareInfo { * @public *
A value that sets the write status of a file share. Set this value to true
* to set the write status to read-only, otherwise set to false
.
Valid Values: true
| false
*
A value that enables guessing of the MIME type for uploaded objects based on file
* extensions. Set this value to true
to enable MIME type guessing, otherwise set
* to false
. The default value is true
.
Valid Values: true
| false
*
true
, the requester pays
* the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays
* the cost of storing data.
- *
*
* RequesterPays
is a configuration for the S3 bucket that backs the file
* share, so make sure that the configuration on the file share is the same as the S3
* bucket configuration.
Valid Values: true
| false
*
The name of the file share. Optional.
- * *
* FileShareName
must be set if an S3 prefix name is set in
@@ -3710,22 +3671,17 @@ export interface NFSFileShareInfo {
* generating an ObjectUploaded
notification. Because clients can make many small
* writes to files, it's best to set this parameter for as long as possible to avoid
* generating multiple notifications for the same file in a small time period.
* SettlingTimeInSeconds
has no effect on the timing of the object
* uploading to Amazon S3, only the timing of the notification.
The following example sets NotificationPolicy
on with
* SettlingTimeInSeconds
set to 60.
* \{\"Upload\": \{\"SettlingTimeInSeconds\": 60\}\}
*
The following example sets NotificationPolicy
off.
* \{\}
*
The status of the file share.
- * *Valid Values: CREATING
| UPDATING
| AVAILABLE
|
* DELETING
*
Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
You can specify LocationARN as a bucket ARN, access point ARN or access point alias, * as shown in the following examples.
- * *Bucket ARN:
*
* arn:aws:s3:::my-bucket/prefix/
*
Access point ARN:
*
* arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/
*
If you specify an access point, the bucket policy must be configured to delegate * access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide.
- * *Access point alias:
*
* test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias
@@ -3888,7 +3838,6 @@ export interface SMBFileShareInfo {
* @public
*
The default storage class for objects put into an Amazon S3 bucket by the S3
* File Gateway. The default value is S3_STANDARD
. Optional.
Valid Values: S3_STANDARD
| S3_INTELLIGENT_TIERING
|
* S3_STANDARD_IA
| S3_ONEZONE_IA
*
A value that sets the write status of a file share. Set this value to true
* to set the write status to read-only, otherwise set to false
.
Valid Values: true
| false
*
A value that enables guessing of the MIME type for uploaded objects based on file
* extensions. Set this value to true
to enable MIME type guessing, otherwise set
* to false
. The default value is true
.
Valid Values: true
| false
*
true
, the requester pays
* the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays
* the cost of storing data.
- *
*
* RequesterPays
is a configuration for the S3 bucket that backs the file
* share, so make sure that the configuration on the file share is the same as the S3
* bucket configuration.
Valid Values: true
| false
*
If this value is set to true
, it indicates that access control list (ACL)
* is enabled on the SMB file share. If it is set to false
, it indicates that
* file and directory permissions are mapped to the POSIX permission.
For more information, see Using Microsoft Windows ACLs to * control access to an SMB file share in the Storage Gateway User * Guide.
@@ -4003,7 +3945,6 @@ export interface SMBFileShareInfo { * @public *The authentication method of the file share. The default is
* ActiveDirectory
.
Valid Values: ActiveDirectory
| GuestAccess
*
The name of the file share. Optional.
- * *
* FileShareName
must be set if an S3 prefix name is set in
@@ -4051,22 +3991,17 @@ export interface SMBFileShareInfo {
* generating an ObjectUploaded
notification. Because clients can make many small
* writes to files, it's best to set this parameter for as long as possible to avoid
* generating multiple notifications for the same file in a small time period.
* SettlingTimeInSeconds
has no effect on the timing of the object
* uploading to Amazon S3, only the timing of the notification.
The following example sets NotificationPolicy
on with
* SettlingTimeInSeconds
set to 60.
* \{\"Upload\": \{\"SettlingTimeInSeconds\": 60\}\}
*
The following example sets NotificationPolicy
off.
* \{\}
*
Indicates the status of a gateway that is a member of the Active Directory * domain.
- * *@@ -4220,7 +4164,6 @@ export interface DescribeSMBSettingsOutput { * @public *
This value is true
if a password for the guest user smbguest
* is set, otherwise false
. Only supported for S3 File Gateways.
Valid Values: true
| false
*
The type of security strategy that was specified for file gateway.
- * *@@ -4410,7 +4352,6 @@ export interface StorediSCSIVolume { * @public *
Indicates if when the stored volume was created, existing data on the underlying local * disk was preserved.
- * *Valid Values: true
| false
*
VolumeUsedInBytes
is different from the compressed size of the
* volume, which is the value that is used to calculate your bill.
- *
* This value is not available for volumes created prior to May 13, 2015, until you * store data on the volume.
@@ -4459,7 +4399,6 @@ export interface StorediSCSIVolume { * myvolume results in the target ARN of *arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume
.
* The target name must be unique across all volumes on a gateway.
- *
* If you don't specify a value, Storage Gateway uses the value that was previously * used for this volume as the new target name.
*/ @@ -4474,7 +4413,6 @@ export interface DescribeStorediSCSIVolumesOutput { * @public *Describes a single unit of output from DescribeStorediSCSIVolumes. The * following fields are returned:
- * *@@ -4619,7 +4557,6 @@ export interface TapeArchive { /** * @public *
The time that the archiving of the virtual tape was completed.
- * *The default timestamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' * format.
*/ @@ -4629,7 +4566,6 @@ export interface TapeArchive { * @public *The Amazon Resource Name (ARN) of the tape gateway that the virtual tape is being * retrieved to.
- * *The virtual tape is retrieved from the virtual tape shelf (VTS).
*/ RetrievedTo?: string; @@ -4643,7 +4579,6 @@ export interface TapeArchive { /** * @public *The size, in bytes, of data stored on the virtual tape.
- * *This value is not available for tapes created prior to May 13, 2015.
*The time that the tape entered the custom tape pool.
- * *The default timestamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' * format.
*/ @@ -4754,7 +4688,6 @@ export interface TapeRecoveryPointInfo { * @public *The time when the point-in-time view of the virtual tape was replicated for later * recovery.
- * *The default timestamp format of the tape recovery point time is in the ISO8601 extended * YYYY-MM-DD'T'HH:MM:SS'Z' format.
*/ @@ -4795,7 +4728,6 @@ export interface DescribeTapeRecoveryPointsOutput { * @public *An opaque string that indicates the position at which the virtual tape recovery points * that were listed for description ended.
- * *Use this marker in your next request to list the next set of virtual tape recovery * points in the list. If there are no more recovery points to describe, this field does not * appear in the response.
@@ -4827,7 +4759,6 @@ export interface DescribeTapesInput { * @public *A marker value, obtained in a previous call to DescribeTapes
. This marker
* indicates which page of results to retrieve.
If not specified, the first page of results is retrieved.
*/ Marker?: string; @@ -4836,7 +4767,6 @@ export interface DescribeTapesInput { * @public *Specifies that the number of virtual tapes described be limited to the specified * number.
- * *Amazon Web Services may impose its own limit, if this field is not set.
*For archiving virtual tapes, indicates how much data remains to be uploaded before * archiving is complete.
- * *Range: 0 (not started) to 100 (complete).
*/ Progress?: number; @@ -4897,7 +4826,6 @@ export interface Tape { /** * @public *The size, in bytes, of data stored on the virtual tape.
- * *This value is not available for tapes created prior to May 13, 2015.
*An opaque string that can be used as part of a subsequent DescribeTapes
* call to retrieve the next page of results.
If a response does not contain a marker, then there are no more results to be * retrieved.
*/ @@ -5023,7 +4950,6 @@ export interface DescribeVTLDevicesInput { * @public *An array of strings, where each string represents the Amazon Resource Name (ARN) of a * VTL device.
- * *All of the specified VTL devices must be from the same gateway. If no VTL devices are * specified, the result will contain all devices on the specified gateway.
@@ -5209,7 +5135,6 @@ export interface DetachVolumeInput { * and detach the volume. The default isfalse
. If this value is set to
* false
, you must manually disconnect the iSCSI connection from the target
* volume.
- *
* Valid Values: true
| false
*
One of the DiskAllocationType
enumeration values that identifies how a
* local disk is used.
Valid Values: UPLOAD_BUFFER
| CACHE_STORAGE
*
The status of the file share.
- * *Valid Values: CREATING
| UPDATING
| AVAILABLE
|
* DELETING
*
The state of the gateway.
- * *Valid Values: DISABLED
| ACTIVE
*
Indicates the status of the gateway as a member of the Active Directory domain.
- * *@@ -5743,7 +5673,6 @@ export interface ListFileSystemAssociationsOutput { /** * @public *
A JSON object containing zero or more of the following fields:
- * *@@ -5818,7 +5747,6 @@ export interface ListLocalDisksOutput { /** * @public *
A JSON object containing the following fields:
- * *@@ -5906,10 +5834,19 @@ export interface ListTapePoolsInput { Limit?: number; } +/** + * @public + * @enum + */ +export const PoolStatus = { + ACTIVE: "ACTIVE", + DELETED: "DELETED", +} as const; + /** * @public */ -export type PoolStatus = "ACTIVE" | "DELETED"; +export type PoolStatus = (typeof PoolStatus)[keyof typeof PoolStatus]; /** * @public @@ -5987,7 +5924,6 @@ export interface ListTapePoolsOutput { /** * @public *
A JSON object that contains one or more of the following fields:
- * *@@ -6090,7 +6026,6 @@ export interface TapeInfo { /** * @public *
A JSON object containing the following fields:
- * *@@ -6180,7 +6115,6 @@ export interface VolumeRecoveryPointInfo { /** * @public *
The size of the data stored on the volume in bytes.
- * *This value is not available for volumes created prior to May 13, 2015, until you * store data on the volume.
@@ -6216,7 +6150,6 @@ export interface ListVolumeRecoveryPointsOutput { /** * @public *A JSON object that contains one or more of the following fields:
- * *@@ -6262,11 +6195,9 @@ export interface VolumeInfo { * @public *
The Amazon Resource Name (ARN) for the storage volume. For example, the following is a * valid ARN:
- * *
* arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB
*
Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
*/ VolumeARN?: string; @@ -6275,7 +6206,6 @@ export interface VolumeInfo { * @public *The unique identifier assigned to the volume. This ID becomes part of the volume Amazon * Resource Name (ARN), which you use as input for other operations.
- * *Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
*/ VolumeId?: string; @@ -6292,7 +6222,6 @@ export interface VolumeInfo { *The unique identifier assigned to your gateway during activation. This ID becomes part * of the gateway Amazon Resource Name (ARN), which you use as input for other * operations.
- * *Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
*/ GatewayId?: string; @@ -6306,7 +6235,6 @@ export interface VolumeInfo { /** * @public *The size of the volume in bytes.
- * *Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
*/ VolumeSizeInBytes?: number; @@ -6321,7 +6249,6 @@ export interface VolumeInfo { /** * @public *A JSON object containing the following fields:
- * *
@@ -6416,7 +6343,6 @@ export interface RefreshCacheInput {
* FolderList
are not refreshed. Only objects that are in folders listed
* directly under FolderList
are found and used for the update. The default is
* true
.
Valid Values: true
| false
*
The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual tape to. * Use the ListGateways operation to return a list of gateways for your * account and Amazon Web Services Region.
- * *You retrieve archived virtual tapes to only one gateway and the gateway must be a tape * gateway.
*/ @@ -6741,7 +6666,6 @@ export interface UpdateAutomaticTapeCreationPolicyOutput { /** * @public *A JSON object containing one or more of the following fields:
- * *@@ -6824,7 +6748,6 @@ export interface UpdateBandwidthRateLimitScheduleOutput { /** * @public *
A JSON object containing one or more of the following fields:
- * *@@ -6840,7 +6763,6 @@ export interface UpdateBandwidthRateLimitScheduleOutput { *
- * *@@ -6861,7 +6783,6 @@ export interface UpdateChapCredentialsInput { * @public *
The secret key that the initiator (for example, the Windows client) must provide to * participate in mutual CHAP with the target.
- * *The secret key must be between 12 and 16 bytes when encoded in UTF-8.
*The secret key that the target must provide to participate in mutual CHAP with the * initiator (e.g. Windows client).
- * *Byte constraints: Minimum bytes of 12. Maximum bytes of 16.
- * *The secret key must be between 12 and 16 bytes when encoded in UTF-8.
*The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use * to monitor and log events in the gateway.
- * *For more information, see What is Amazon CloudWatch * Logs? *
@@ -7048,7 +6966,6 @@ export interface UpdateGatewaySoftwareNowOutput { /** * @public *A JSON object containing the following fields:
- * *@@ -7142,7 +7059,6 @@ export interface UpdateNFSFileShareInput { *
Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
The default storage class for objects put into an Amazon S3 bucket by the S3
* File Gateway. The default value is S3_STANDARD
. Optional.
Valid Values: S3_STANDARD
| S3_INTELLIGENT_TIERING
|
* S3_STANDARD_IA
| S3_ONEZONE_IA
*
The user mapped to anonymous user.
- * *Valid values are the following:
- * *@@ -7213,7 +7126,6 @@ export interface UpdateNFSFileShareInput { * @public *
A value that sets the write status of a file share. Set this value to true
* to set the write status to read-only, otherwise set to false
.
Valid Values: true
| false
*
A value that enables guessing of the MIME type for uploaded objects based on file
* extensions. Set this value to true
to enable MIME type guessing, otherwise set
* to false
. The default value is true
.
Valid Values: true
| false
*
true
, the requester pays
* the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays
* the cost of storing data.
- *
*
* RequesterPays
is a configuration for the S3 bucket that backs the file
* share, so make sure that the configuration on the file share is the same as the S3
* bucket configuration.
Valid Values: true
| false
*
The name of the file share. Optional.
- * *
* FileShareName
must be set if an S3 prefix name is set in
@@ -7274,22 +7182,17 @@ export interface UpdateNFSFileShareInput {
* generating an ObjectUploaded
notification. Because clients can make many small
* writes to files, it's best to set this parameter for as long as possible to avoid
* generating multiple notifications for the same file in a small time period.
* SettlingTimeInSeconds
has no effect on the timing of the object
* uploading to Amazon S3, only the timing of the notification.
The following example sets NotificationPolicy
on with
* SettlingTimeInSeconds
set to 60.
* \{\"Upload\": \{\"SettlingTimeInSeconds\": 60\}\}
*
The following example sets NotificationPolicy
off.
* \{\}
*
Set to true
to use Amazon S3 server-side encryption with your own
* KMS key, or false
to use a key managed by Amazon S3.
* Optional.
Valid Values: true
| false
*
The default storage class for objects put into an Amazon S3 bucket by the S3
* File Gateway. The default value is S3_STANDARD
. Optional.
Valid Values: S3_STANDARD
| S3_INTELLIGENT_TIERING
|
* S3_STANDARD_IA
| S3_ONEZONE_IA
*
A value that sets the write status of a file share. Set this value to true
* to set write status to read-only, otherwise set to false
.
Valid Values: true
| false
*
A value that enables guessing of the MIME type for uploaded objects based on file
* extensions. Set this value to true
to enable MIME type guessing, otherwise set
* to false
. The default value is true
.
Valid Values: true
| false
*
true
, the requester pays
* the costs; otherwise, the S3 bucket owner pays. However, the S3 bucket owner always pays
* the cost of storing data.
- *
*
* RequesterPays
is a configuration for the S3 bucket that backs the file
* share, so make sure that the configuration on the file share is the same as the S3
* bucket configuration.
Valid Values: true
| false
*
Set this value to true
to enable access control list (ACL) on the SMB file
* share. Set it to false
to map file and directory permissions to the POSIX
* permissions.
For more information, see Using Microsoft Windows ACLs to * control access to an SMB file share in the Storage Gateway User * Guide.
- * *Valid Values: true
| false
*
The name of the file share. Optional.
- * *
* FileShareName
must be set if an S3 prefix name is set in
@@ -7496,22 +7388,17 @@ export interface UpdateSMBFileShareInput {
* generating an ObjectUploaded
notification. Because clients can make many small
* writes to files, it's best to set this parameter for as long as possible to avoid
* generating multiple notifications for the same file in a small time period.
* SettlingTimeInSeconds
has no effect on the timing of the object
* uploading to Amazon S3, only the timing of the notification.
The following example sets NotificationPolicy
on with
* SettlingTimeInSeconds
set to 60.
* \{\"Upload\": \{\"SettlingTimeInSeconds\": 60\}\}
*
The following example sets NotificationPolicy
off.
* \{\}
*
Specifies the type of security strategy.
- * *ClientSpecified: if you use this option, requests are established based on what is * negotiated by the client. This option is recommended when you want to maximize * compatibility across different clients in your environment. Supported only in S3 File * Gateway.
- * *MandatorySigning: if you use this option, file gateway only allows connections from * SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on * Microsoft Windows Vista, Windows Server 2008 or newer.
- * *MandatoryEncryption: if you use this option, file gateway only allows connections from * SMBv3 clients that have encryption enabled. This option is highly recommended for * environments that handle sensitive data. This option works with SMB clients on Microsoft @@ -7651,7 +7535,6 @@ export interface UpdateSMBSecurityStrategyOutput { /** * @public *
A JSON object containing one or more of the following fields:
- * *@@ -7707,7 +7590,6 @@ export interface UpdateSnapshotScheduleInput { * @public *
A list of up to 50 tags that can be assigned to a snapshot. Each tag is a key-value * pair.
- * *Valid characters for key and value are letters, spaces, and numbers representable in * UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length @@ -7745,7 +7627,6 @@ export interface UpdateVTLDeviceTypeInput { /** * @public *
The type of medium changer you want to select.
- * *Valid Values: STK-L700
| AWS-Gateway-VTL
|
* IBM-03584L32-0402
*
Before you create and manage canaries, be aware of the security considerations. For more information, see Security Considerations for Synthetics Canaries.
diff --git a/clients/client-synthetics/src/Synthetics.ts b/clients/client-synthetics/src/Synthetics.ts index 586a9d6a8f879..88240ed18e832 100644 --- a/clients/client-synthetics/src/Synthetics.ts +++ b/clients/client-synthetics/src/Synthetics.ts @@ -396,7 +396,6 @@ export interface Synthetics { * information, see Using ServiceLens to Monitor * the Health of Your Applications in the Amazon CloudWatch User * Guide. - * *Before you create and manage canaries, be aware of the security considerations. For more * information, see Security * Considerations for Synthetics Canaries.
diff --git a/clients/client-synthetics/src/SyntheticsClient.ts b/clients/client-synthetics/src/SyntheticsClient.ts index e0faff209b5e0..d56bfe9656f4c 100644 --- a/clients/client-synthetics/src/SyntheticsClient.ts +++ b/clients/client-synthetics/src/SyntheticsClient.ts @@ -331,7 +331,6 @@ export interface SyntheticsClientResolvedConfig extends SyntheticsClientResolved * information, see Using ServiceLens to Monitor * the Health of Your Applications in the Amazon CloudWatch User * Guide. - * *Before you create and manage canaries, be aware of the security considerations. For more * information, see Security * Considerations for Synthetics Canaries.
diff --git a/clients/client-synthetics/src/commands/CreateCanaryCommand.ts b/clients/client-synthetics/src/commands/CreateCanaryCommand.ts index 47e06800747d5..ed7e650fa3e56 100644 --- a/clients/client-synthetics/src/commands/CreateCanaryCommand.ts +++ b/clients/client-synthetics/src/commands/CreateCanaryCommand.ts @@ -95,7 +95,7 @@ export interface CreateCanaryCommandOutput extends CreateCanaryResponse, __Metad * }, * ArtifactConfig: { // ArtifactConfigInput * S3Encryption: { // S3EncryptionConfig - * EncryptionMode: "STRING_VALUE", + * EncryptionMode: "SSE_S3" || "SSE_KMS", * KmsKeyArn: "STRING_VALUE", * }, * }, @@ -123,9 +123,9 @@ export interface CreateCanaryCommandOutput extends CreateCanaryResponse, __Metad * // SuccessRetentionPeriodInDays: Number("int"), * // FailureRetentionPeriodInDays: Number("int"), * // Status: { // CanaryStatus - * // State: "STRING_VALUE", + * // State: "CREATING" || "READY" || "STARTING" || "RUNNING" || "UPDATING" || "STOPPING" || "STOPPED" || "ERROR" || "DELETING", * // StateReason: "STRING_VALUE", - * // StateReasonCode: "STRING_VALUE", + * // StateReasonCode: "INVALID_PERMISSIONS" || "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "UPDATE_PENDING" || "UPDATE_IN_PROGRESS" || "UPDATE_COMPLETE" || "ROLLBACK_COMPLETE" || "ROLLBACK_FAILED" || "DELETE_IN_PROGRESS" || "DELETE_FAILED" || "SYNC_DELETE_IN_PROGRESS", * // }, * // Timeline: { // CanaryTimeline * // Created: new Date("TIMESTAMP"), @@ -161,7 +161,7 @@ export interface CreateCanaryCommandOutput extends CreateCanaryResponse, __Metad * // }, * // ArtifactConfig: { // ArtifactConfigOutput * // S3Encryption: { // S3EncryptionConfig - * // EncryptionMode: "STRING_VALUE", + * // EncryptionMode: "SSE_S3" || "SSE_KMS", * // KmsKeyArn: "STRING_VALUE", * // }, * // }, diff --git a/clients/client-synthetics/src/commands/DeleteCanaryCommand.ts b/clients/client-synthetics/src/commands/DeleteCanaryCommand.ts index 61c61b11852bd..837d55d9f80df 100644 --- a/clients/client-synthetics/src/commands/DeleteCanaryCommand.ts +++ b/clients/client-synthetics/src/commands/DeleteCanaryCommand.ts @@ -64,7 +64,6 @@ export interface DeleteCanaryCommandOutput extends DeleteCanaryResponse, __Metad * . *Before you delete a canary, you might want to use GetCanary
to display
* the information about this canary. Make
* note of the information returned by this operation so that you can delete these resources
diff --git a/clients/client-synthetics/src/commands/DescribeCanariesCommand.ts b/clients/client-synthetics/src/commands/DescribeCanariesCommand.ts
index 4a7d0acd1cc42..8bf6bbe54d83e 100644
--- a/clients/client-synthetics/src/commands/DescribeCanariesCommand.ts
+++ b/clients/client-synthetics/src/commands/DescribeCanariesCommand.ts
@@ -83,9 +83,9 @@ export interface DescribeCanariesCommandOutput extends DescribeCanariesResponse,
* // SuccessRetentionPeriodInDays: Number("int"),
* // FailureRetentionPeriodInDays: Number("int"),
* // Status: { // CanaryStatus
- * // State: "STRING_VALUE",
+ * // State: "CREATING" || "READY" || "STARTING" || "RUNNING" || "UPDATING" || "STOPPING" || "STOPPED" || "ERROR" || "DELETING",
* // StateReason: "STRING_VALUE",
- * // StateReasonCode: "STRING_VALUE",
+ * // StateReasonCode: "INVALID_PERMISSIONS" || "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "UPDATE_PENDING" || "UPDATE_IN_PROGRESS" || "UPDATE_COMPLETE" || "ROLLBACK_COMPLETE" || "ROLLBACK_FAILED" || "DELETE_IN_PROGRESS" || "DELETE_FAILED" || "SYNC_DELETE_IN_PROGRESS",
* // },
* // Timeline: { // CanaryTimeline
* // Created: new Date("TIMESTAMP"),
@@ -121,7 +121,7 @@ export interface DescribeCanariesCommandOutput extends DescribeCanariesResponse,
* // },
* // ArtifactConfig: { // ArtifactConfigOutput
* // S3Encryption: { // S3EncryptionConfig
- * // EncryptionMode: "STRING_VALUE",
+ * // EncryptionMode: "SSE_S3" || "SSE_KMS",
* // KmsKeyArn: "STRING_VALUE",
* // },
* // },
diff --git a/clients/client-synthetics/src/commands/DescribeCanariesLastRunCommand.ts b/clients/client-synthetics/src/commands/DescribeCanariesLastRunCommand.ts
index de6b37f9305c5..c95243a4b5488 100644
--- a/clients/client-synthetics/src/commands/DescribeCanariesLastRunCommand.ts
+++ b/clients/client-synthetics/src/commands/DescribeCanariesLastRunCommand.ts
@@ -37,7 +37,6 @@ export interface DescribeCanariesLastRunCommandOutput extends DescribeCanariesLa
/**
* @public
*
Use this operation to see information from the most recent run of each canary that you have created.
- * *This operation supports resource-level authorization using an IAM policy and
* the Names
parameter. If you specify the Names
parameter, the operation is successful only if you have authorization to view
* all the canaries that you specify in your request. If you do not have permission to view any of
@@ -69,9 +68,9 @@ export interface DescribeCanariesLastRunCommandOutput extends DescribeCanariesLa
* // Id: "STRING_VALUE",
* // Name: "STRING_VALUE",
* // Status: { // CanaryRunStatus
- * // State: "STRING_VALUE",
+ * // State: "RUNNING" || "PASSED" || "FAILED",
* // StateReason: "STRING_VALUE",
- * // StateReasonCode: "STRING_VALUE",
+ * // StateReasonCode: "CANARY_FAILURE" || "EXECUTION_FAILURE",
* // },
* // Timeline: { // CanaryRunTimeline
* // Started: new Date("TIMESTAMP"),
diff --git a/clients/client-synthetics/src/commands/GetCanaryCommand.ts b/clients/client-synthetics/src/commands/GetCanaryCommand.ts
index 9bb92b9fb13b7..3fb09d090a8dc 100644
--- a/clients/client-synthetics/src/commands/GetCanaryCommand.ts
+++ b/clients/client-synthetics/src/commands/GetCanaryCommand.ts
@@ -71,9 +71,9 @@ export interface GetCanaryCommandOutput extends GetCanaryResponse, __MetadataBea
* // SuccessRetentionPeriodInDays: Number("int"),
* // FailureRetentionPeriodInDays: Number("int"),
* // Status: { // CanaryStatus
- * // State: "STRING_VALUE",
+ * // State: "CREATING" || "READY" || "STARTING" || "RUNNING" || "UPDATING" || "STOPPING" || "STOPPED" || "ERROR" || "DELETING",
* // StateReason: "STRING_VALUE",
- * // StateReasonCode: "STRING_VALUE",
+ * // StateReasonCode: "INVALID_PERMISSIONS" || "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "UPDATE_PENDING" || "UPDATE_IN_PROGRESS" || "UPDATE_COMPLETE" || "ROLLBACK_COMPLETE" || "ROLLBACK_FAILED" || "DELETE_IN_PROGRESS" || "DELETE_FAILED" || "SYNC_DELETE_IN_PROGRESS",
* // },
* // Timeline: { // CanaryTimeline
* // Created: new Date("TIMESTAMP"),
@@ -109,7 +109,7 @@ export interface GetCanaryCommandOutput extends GetCanaryResponse, __MetadataBea
* // },
* // ArtifactConfig: { // ArtifactConfigOutput
* // S3Encryption: { // S3EncryptionConfig
- * // EncryptionMode: "STRING_VALUE",
+ * // EncryptionMode: "SSE_S3" || "SSE_KMS",
* // KmsKeyArn: "STRING_VALUE",
* // },
* // },
diff --git a/clients/client-synthetics/src/commands/GetCanaryRunsCommand.ts b/clients/client-synthetics/src/commands/GetCanaryRunsCommand.ts
index b0d7f0bc4cb15..1375016694d37 100644
--- a/clients/client-synthetics/src/commands/GetCanaryRunsCommand.ts
+++ b/clients/client-synthetics/src/commands/GetCanaryRunsCommand.ts
@@ -56,9 +56,9 @@ export interface GetCanaryRunsCommandOutput extends GetCanaryRunsResponse, __Met
* // Id: "STRING_VALUE",
* // Name: "STRING_VALUE",
* // Status: { // CanaryRunStatus
- * // State: "STRING_VALUE",
+ * // State: "RUNNING" || "PASSED" || "FAILED",
* // StateReason: "STRING_VALUE",
- * // StateReasonCode: "STRING_VALUE",
+ * // StateReasonCode: "CANARY_FAILURE" || "EXECUTION_FAILURE",
* // },
* // Timeline: { // CanaryRunTimeline
* // Started: new Date("TIMESTAMP"),
diff --git a/clients/client-synthetics/src/commands/UpdateCanaryCommand.ts b/clients/client-synthetics/src/commands/UpdateCanaryCommand.ts
index 74c050c369f47..79436449ac380 100644
--- a/clients/client-synthetics/src/commands/UpdateCanaryCommand.ts
+++ b/clients/client-synthetics/src/commands/UpdateCanaryCommand.ts
@@ -94,7 +94,7 @@ export interface UpdateCanaryCommandOutput extends UpdateCanaryResponse, __Metad
* ArtifactS3Location: "STRING_VALUE",
* ArtifactConfig: { // ArtifactConfigInput
* S3Encryption: { // S3EncryptionConfig
- * EncryptionMode: "STRING_VALUE",
+ * EncryptionMode: "SSE_S3" || "SSE_KMS",
* KmsKeyArn: "STRING_VALUE",
* },
* },
diff --git a/clients/client-synthetics/src/endpoint/ruleset.ts b/clients/client-synthetics/src/endpoint/ruleset.ts
index ad5c3749f7b7a..b251f12d68cfa 100644
--- a/clients/client-synthetics/src/endpoint/ruleset.ts
+++ b/clients/client-synthetics/src/endpoint/ruleset.ts
@@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types";
or see "smithy.rules#endpointRuleSet"
in codegen/sdk-codegen/aws-models/synthetics.json */
-const p="required",
-q="fn",
-r="argv",
-s="ref";
-const a="PartitionResult",
+const q="required",
+r="fn",
+s="argv",
+t="ref";
+const a="isSet",
b="tree",
c="error",
d="endpoint",
-e={[p]:false,"type":"String"},
-f={[p]:true,"default":false,"type":"Boolean"},
-g={[s]:"Endpoint"},
-h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]},
-i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]},
-j={},
-k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]},
-l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]},
-m=[g],
-n=[h],
-o=[i];
-const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://synthetics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://synthetics-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://synthetics.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://synthetics.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]};
+e="PartitionResult",
+f={[q]:false,"type":"String"},
+g={[q]:true,"default":false,"type":"Boolean"},
+h={[t]:"Endpoint"},
+i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]},
+j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]},
+k={},
+l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]},
+m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]},
+n=[i],
+o=[j],
+p=[{[t]:"Region"}];
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://synthetics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://synthetics-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://synthetics.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://synthetics.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-synthetics/src/index.ts b/clients/client-synthetics/src/index.ts
index ffd27b88c7749..46b1b623f3f12 100644
--- a/clients/client-synthetics/src/index.ts
+++ b/clients/client-synthetics/src/index.ts
@@ -13,7 +13,6 @@
* information, see Using ServiceLens to Monitor
* the Health of Your Applications in the Amazon CloudWatch User
* Guide.
Before you create and manage canaries, be aware of the security considerations. For more * information, see Security * Considerations for Synthetics Canaries.
diff --git a/clients/client-synthetics/src/models/models_0.ts b/clients/client-synthetics/src/models/models_0.ts index a00fb0592a14a..8fab2cecd7261 100644 --- a/clients/client-synthetics/src/models/models_0.ts +++ b/clients/client-synthetics/src/models/models_0.ts @@ -23,7 +23,7 @@ export type EncryptionMode = (typeof EncryptionMode)[keyof typeof EncryptionMode * uploads to Amazon S3. *For more information, see * Encrypting canary artifacts - *
+ * */ export interface S3EncryptionConfig { /** @@ -1831,7 +1831,6 @@ export interface UntagResourceResponse {} * @public *An object that specifies what screenshots to use as a baseline for visual monitoring by this canary. It can * optionally also specify parts of the screenshots to ignore during the visual monitoring comparison.
- * *Visual monitoring is supported only on canaries running the syn-puppeteer-node-3.2
* runtime or later. For more information, see
* Visual monitoring and
diff --git a/clients/client-textract/src/endpoint/ruleset.ts b/clients/client-textract/src/endpoint/ruleset.ts
index 48ffca23343b5..6abf0a919e34d 100644
--- a/clients/client-textract/src/endpoint/ruleset.ts
+++ b/clients/client-textract/src/endpoint/ruleset.ts
@@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"
n=[i],
o=[j],
p=[{[t]:"Region"}];
-const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://textract-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://textract-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://textract.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://textract.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]};
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://textract-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://textract-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://textract.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://textract.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-timestream-query/src/TimestreamQuery.ts b/clients/client-timestream-query/src/TimestreamQuery.ts
index 03e80df37829b..4c8e959ea0ca9 100644
--- a/clients/client-timestream-query/src/TimestreamQuery.ts
+++ b/clients/client-timestream-query/src/TimestreamQuery.ts
@@ -270,7 +270,7 @@ export interface TimestreamQuery {
* @public
* DescribeEndpoints returns a list of available endpoints to make Timestream
* API calls against. This API is available through both Write and Query. Because the Timestream SDKs are designed to transparently work with the
+ * Because the Timestream SDKs are designed to transparently work with the
* service’s architecture, including the management and mapping of the service endpoints,
* it is not recommended that you use this API unless: You are using VPC endpoints (Amazon Web Services PrivateLink) with Timestream
+ * You are using VPC endpoints (Amazon Web Services PrivateLink) with Timestream
*
* Your application uses a programming language that does not yet have SDK
+ * Your application uses a programming language that does not yet have SDK
* support You require better control over the client-side implementation You require better control over the client-side implementation For detailed information on how and when to use and implement DescribeEndpoints, see
+ * For detailed information on how and when to use and implement DescribeEndpoints, see
* The Endpoint Discovery Pattern.
+ *
*
- *
Your query request will fail in the following cases:
- *Your query request will fail in the following cases:
+ * If you submit a Query
request with the same client token outside
+ *
If you submit a Query
request with the same client token outside
* of the 5-minute idempotency window.
If you submit a Query
request with the same client token, but
+ *
If you submit a Query
request with the same client token, but
* change other parameters, within the 5-minute idempotency window.
If the size of the row (including the query metadata) exceeds 1 MB, then the + *
If the size of the row (including the query metadata) exceeds 1 MB, then the * query will fail with the following error message:
- *+ *
* Query aborted as max page response size has been exceeded by the output
* result row
- *
If the IAM principal of the query initiator and the result reader are not the + *
If the IAM principal of the query initiator and the result reader are not the
* same and/or the query initiator and the result reader do not have the same query
* string in the query requests, the query will fail with an Invalid
* pagination token
error.
The query string to run. Parameter
* names can be specified in the query string @
character followed by an
* identifier. The named Parameter @scheduled_runtime
is reserved and can be used in the query to get the time at which the query is scheduled to run.
The timestamp calculated according to the ScheduleConfiguration parameter, will be the value of @scheduled_runtime
paramater for each query run.
+ *
The timestamp calculated according to the ScheduleConfiguration parameter, will be the value of @scheduled_runtime
paramater for each query run.
* For example, consider an instance of a scheduled query executing on 2021-12-01 00:00:00. For this instance, the @scheduled_runtime
parameter is
* initialized to the timestamp 2021-12-01 00:00:00 when invoking the query.
If CreateScheduledQuery is called without a ClientToken
, the
+ *
If CreateScheduledQuery is called without a ClientToken
, the
* Query SDK generates a ClientToken
on your behalf.
After 8 hours, any request with the same ClientToken
is treated
+ *
After 8 hours, any request with the same ClientToken
is treated
* as a new request.
If ErrorReportConfiguration uses SSE_KMS
as encryption type, the same KmsKeyId is used to encrypt the error report at rest.
If ErrorReportConfiguration uses SSE_KMS
as encryption type, the same KmsKeyId is used to encrypt the error report at rest.
Query
* requests has the same effect as making a single request. When using
* ClientToken
in a query, note the following:
- * If the Query API is instantiated without a ClientToken
, the
+ *
If the Query API is instantiated without a ClientToken
, the
* Query SDK generates a ClientToken
on your behalf.
If the Query
invocation only contains the
+ *
If the Query
invocation only contains the
* ClientToken
but does not include a NextToken
, that
* invocation of Query
is assumed to be a new query run.
If the invocation contains NextToken
, that particular invocation
+ *
If the invocation contains NextToken
, that particular invocation
* is assumed to be a subsequent invocation of a prior call to the Query API, and a
* result set is returned.
After 4 hours, any request with the same ClientToken
is treated
+ *
After 4 hours, any request with the same ClientToken
is treated
* as a new request.
Query
invocation only contains the
* ClientToken
, that invocation of Query
is assumed to be a
* new query run.
- * Note the following when using NextToken in a query:
- *Note the following when using NextToken in a query:
+ *A pagination token can be used for up to five Query
invocations,
+ *
A pagination token can be used for up to five Query
invocations,
* OR for a duration of up to 1 hour – whichever comes first.
Using the same NextToken
will return the same set of records. To
+ *
Using the same NextToken
will return the same set of records. To
* keep paginating through the result set, you must to use the most recent
* nextToken
.
Suppose a Query
invocation returns two NextToken
+ *
Suppose a Query
invocation returns two NextToken
* values, TokenA
and TokenB
. If TokenB
is
* used in a subsequent Query
invocation, then TokenA
is
* invalidated and cannot be reused.
To request a previous result set from a query after pagination has begun, you + *
To request a previous result set from a query after pagination has begun, you * must re-invoke the Query API.
*The latest NextToken
should be used to paginate until
+ *
The latest NextToken
should be used to paginate until
* null
is returned, at which point a new NextToken
* should be used.
If the IAM principal of the query initiator and the result reader are not the + *
If the IAM principal of the query initiator and the result reader are not the
* same and/or the query initiator and the result reader do not have the same query
* string in the query requests, the query will fail with an Invalid
* pagination token
error.
The total number of rows to be returned in the Query
output. The initial
* run of Query
with a MaxRows
value specified will return the
* result set of the query in two cases:
The size of the result is less than 1MB
.
The size of the result is less than 1MB
.
The number of rows in the result set is less than the value of + *
The number of rows in the result set is less than the value of
* maxRows
.
Otherwise, the initial invocation of Query
only returns a
+ *
Otherwise, the initial invocation of Query
only returns a
* NextToken
, which can then be used in subsequent calls to fetch the
* result set. To resume pagination, provide the NextToken
value in the
* subsequent command.
If the row size is large (e.g. a row has many columns), Timestream may return + *
If the row size is large (e.g. a row has many columns), Timestream may return
* fewer rows to keep the response size from exceeding the 1 MB limit. If
* MaxRows
is not provided, Timestream will send the necessary
* number of rows to meet the 1 MB limit.
Starts a bidirectional HTTP/2 or WebSocket stream where audio is streamed to * Amazon Transcribe Medical and the transcription results are streamed to your * application.
- *The following parameters are required:
- *The following parameters are required:
+ *+ *
* language-code
*
+ *
* media-encoding
*
+ *
* sample-rate
*
For more information on streaming with Amazon Transcribe Medical, see + *
For more information on streaming with Amazon Transcribe Medical, see * Transcribing * streaming audio.
* @example diff --git a/clients/client-transcribe-streaming/src/endpoint/ruleset.ts b/clients/client-transcribe-streaming/src/endpoint/ruleset.ts index fa831da484768..9f76ad034aa7d 100644 --- a/clients/client-transcribe-streaming/src/endpoint/ruleset.ts +++ b/clients/client-transcribe-streaming/src/endpoint/ruleset.ts @@ -6,26 +6,25 @@ import { RuleSetObject } from "@smithy/types"; or see "smithy.rules#endpointRuleSet" in codegen/sdk-codegen/aws-models/transcribe-streaming.json */ -const r="required", -s="fn", -t="argv", -u="ref"; -const a="PartitionResult", +const q="required", +r="fn", +s="argv", +t="ref"; +const a="isSet", b="tree", c="error", d="endpoint", -e="stringEquals", -f={[r]:false,"type":"String"}, -g={[r]:true,"default":false,"type":"Boolean"}, -h={[u]:"Region"}, -i={[u]:"Endpoint"}, -j={[s]:"booleanEquals",[t]:[{[u]:"UseFIPS"},true]}, -k={[s]:"booleanEquals",[t]:[{[u]:"UseDualStack"},true]}, -l={}, -m={[s]:"booleanEquals",[t]:[true,{[s]:"getAttr",[t]:[{[u]:a},"supportsFIPS"]}]}, -n={[s]:"booleanEquals",[t]:[true,{[s]:"getAttr",[t]:[{[u]:a},"supportsDualStack"]}]}, -o=[i], -p=[j], -q=[k]; -const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[s]:"aws.partition",[t]:[h],assign:a}],type:b,rules:[{conditions:[{[s]:"isSet",[t]:o},{[s]:"parseURL",[t]:o,assign:"url"}],type:b,rules:[{conditions:p,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:q,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:i,properties:l,headers:l},type:d}]}]},{conditions:[j,k],type:b,rules:[{conditions:[m,n],type:b,rules:[{endpoint:{url:"https://transcribestreaming-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:p,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{conditions:[{[s]:e,[t]:[h,"transcribestreaming-ca-central-1"]}],endpoint:{url:"https://transcribestreaming-fips.ca-central-1.amazonaws.com",properties:l,headers:l},type:d},{conditions:[{[s]:e,[t]:[h,"transcribestreaming-us-east-1"]}],endpoint:{url:"https://transcribestreaming-fips.us-east-1.amazonaws.com",properties:l,headers:l},type:d},{conditions:[{[s]:e,[t]:[h,"transcribestreaming-us-east-2"]}],endpoint:{url:"https://transcribestreaming-fips.us-east-2.amazonaws.com",properties:l,headers:l},type:d},{conditions:[{[s]:e,[t]:[h,"transcribestreaming-us-west-2"]}],endpoint:{url:"https://transcribestreaming-fips.us-west-2.amazonaws.com",properties:l,headers:l},type:d},{endpoint:{url:"https://transcribestreaming-fips.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:q,type:b,rules:[{conditions:[n],type:b,rules:[{endpoint:{url:"https://transcribestreaming.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:l,headers:l},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://transcribestreaming.{Region}.{PartitionResult#dnsSuffix}",properties:l,headers:l},type:d}]}]}; +e="PartitionResult", +f={[q]:false,"type":"String"}, +g={[q]:true,"default":false,"type":"Boolean"}, +h={[t]:"Endpoint"}, +i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]}, +j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]}, +k={}, +l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]}, +m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]}, +n=[i], +o=[j], +p=[{[t]:"Region"}]; +const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://transcribestreaming-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://transcribestreaming-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://transcribestreaming.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://transcribestreaming.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}; export const ruleSet: RuleSetObject = _data; diff --git a/clients/client-transcribe-streaming/src/models/models_0.ts b/clients/client-transcribe-streaming/src/models/models_0.ts index ca039b3065317..1c4a9a7e4c377 100644 --- a/clients/client-transcribe-streaming/src/models/models_0.ts +++ b/clients/client-transcribe-streaming/src/models/models_0.ts @@ -1100,7 +1100,7 @@ export interface MedicalEntity { /** * @public *The confidence score associated with the identified PHI entity in your audio.
- *Confidence scores are values between 0 and 1. A larger value indicates a higher + *
Confidence scores are values between 0 and 1. A larger value indicates a higher * probability that the identified entity correctly matches the entity spoken in your * media.
*/ @@ -1141,7 +1141,7 @@ export interface MedicalItem { /** * @public *The confidence score associated with a word or phrase in your transcript.
- *Confidence scores are values between 0 and 1. A larger value indicates a higher + *
Confidence scores are values between 0 and 1. A larger value indicates a higher * probability that the identified item correctly matches the item spoken in your * media.
*/ @@ -1200,7 +1200,7 @@ export type MedicalContentIdentificationType = * @public *The Result
associated with a
* .
Contains a set of transcription results from one or more audio segments, along with + *
Contains a set of transcription results from one or more audio segments, along with * additional information per your request parameters. This can include information relating to * alternative transcriptions, channel identification, partial result stabilization, language * identification, and other transcription-related data.
@@ -1227,7 +1227,7 @@ export interface MedicalResult { /** * @public *Indicates if the segment is complete.
- *If IsPartial
is true
, the segment is not complete. If
+ *
If IsPartial
is true
, the segment is not complete. If
* IsPartial
is false
, the segment is complete.
The MedicalTranscript
associated with a
* .
+ *
* MedicalTranscript
contains Results
, which contains a set of
* transcription results from one or more audio segments, along with additional information per your
* request parameters.
The MedicalTranscriptEvent
associated with a
* MedicalTranscriptResultStream
.
Contains a set of transcription results from one or more audio segments, along with additional + *
Contains a set of transcription results from one or more audio segments, along with additional * information per your request parameters.
*/ export interface MedicalTranscriptEvent { @@ -1306,7 +1306,7 @@ export namespace MedicalTranscriptResultStream { * @public *The MedicalTranscriptEvent
associated with a
* MedicalTranscriptResultStream
.
Contains a set of transcription results from one or more audio segments, along with + *
Contains a set of transcription results from one or more audio segments, along with * additional information per your request parameters. This can include information relating to * alternative transcriptions, channel identification, partial result stabilization, language * identification, and other transcription-related data.
@@ -1823,9 +1823,9 @@ export interface StartMedicalStreamTranscriptionRequest { /** * @public *Specify the language code that represents the language spoken in your audio.
- *Amazon Transcribe Medical only supports US English (en-US
).
Specify the encoding used for the input audio. Supported formats are:
- *FLAC
+ *FLAC
*OPUS-encoded audio in an Ogg container
+ *OPUS-encoded audio in an Ogg container
*PCM (only signed 16-bit little-endian audio formats, which does not include + *
PCM (only signed 16-bit little-endian audio formats, which does not include * WAV)
*For more information, see Media formats.
+ *For more information, see Media formats.
*/ MediaEncoding: MediaEncoding | string | undefined; @@ -1881,7 +1881,7 @@ export interface StartMedicalStreamTranscriptionRequest { * @public *Enables speaker partitioning (diarization) in your transcription output. Speaker * partitioning labels the speech from individual speakers in your media file.
- *For more information, see Partitioning speakers (diarization).
+ *For more information, see Partitioning speakers (diarization).
*/ ShowSpeakerLabel?: boolean; @@ -1890,7 +1890,7 @@ export interface StartMedicalStreamTranscriptionRequest { *Specify a name for your transcription session. If you don't include this parameter in * your request, Amazon Transcribe Medical generates an ID and returns it in the * response.
- *You can use a session ID to retry a streaming session.
+ *You can use a session ID to retry a streaming session.
*/ SessionId?: string; @@ -1905,11 +1905,11 @@ export interface StartMedicalStreamTranscriptionRequest { /** * @public *Enables channel identification in multi-channel audio.
- *Channel identification transcribes the audio on each channel independently, then appends + *
Channel identification transcribes the audio on each channel independently, then appends * the output for each channel into one transcript.
- *If you have multi-channel audio and do not enable channel identification, your audio is + *
If you have multi-channel audio and do not enable channel identification, your audio is * transcribed in a continuous manner and your transcript is not separated by channel.
- *For more information, see Transcribing multi-channel audio.
+ *For more information, see Transcribing multi-channel audio.
*/ EnableChannelIdentification?: boolean; @@ -1923,9 +1923,9 @@ export interface StartMedicalStreamTranscriptionRequest { /** * @public *Labels all personal health information (PHI) identified in your transcript.
- *Content identification is performed at the segment level; PHI is flagged upon complete + *
Content identification is performed at the segment level; PHI is flagged upon complete * transcription of an audio segment.
- *For more information, see Identifying personal health information (PHI) in a
+ * For more information, see Identifying personal health information (PHI) in a
* transcription. This example describes how to update in-transit email message. For more information and examples for using this API, see
*
* Updating message content with AWS Lambda. Updates to an in-transit message only appear when you call The Amplify UI Builder API provides a programmatic interface for creating\n and configuring user interface (UI) component libraries and themes for use in your Amplify applications. You can then connect these UI components to an application's\n backend Amazon Web Services resources. You can also use the Amplify Studio visual designer to create UI components\n and model data for an app. For more information, see Introduction in the\n Amplify Docs. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and\n documentation for client app development. For more information, see the Amplify Framework. For more information about\n deploying an Amplify application to Amazon Web Services, see the Amplify User Guide. Creates a new component for an Amplify app. Creates a new form for an Amplify app. Creates a theme to apply to the components in an Amplify app. Deletes a component from an Amplify app. Deletes a form from an Amplify app. Deletes a theme from an Amplify app. Returns an existing code generation job. Returns an existing component for an Amplify app. Returns an existing form for an Amplify app. Returns an existing theme for an Amplify app. Retrieves a list of code generation jobs for a specified Amplify app and backend environment. Retrieves a list of components for a specified Amplify app and backend\n environment. Retrieves a list of forms for a specified Amplify app and backend environment. Retrieves a list of themes for a specified Amplify app and backend\n environment. Starts a code generation job for a specified Amplify app and backend environment. Updates an existing component. Updates an existing form. Updates an existing theme. Optional metadata that you can apply to the service mesh to assist with categorization\n and organization. Each tag consists of a key and an optional value, both of which you\n define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters. Optional metadata that you can apply to the route to assist with categorization and\n organization. Each tag consists of a key and an optional value, both of which you define.\n Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters. Optional metadata that you can apply to the virtual node to assist with categorization\n and organization. Each tag consists of a key and an optional value, both of which you\n define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters. Optional metadata that you can apply to the virtual router to assist with categorization\n and organization. Each tag consists of a key and an optional value, both of which you\n define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters. Optional metadata that you can apply to the virtual service to assist with\n categorization and organization. Each tag consists of a key and an optional value, both of\n which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have\n a maximum length of 256 characters. Amazon Web Services Billing Conductor is a fully managed service that you can use\n to customize a pro forma version of your billing data each month, to accurately show or chargeback\n your end customers. Amazon Web Services Billing Conductor doesn't change the way\n you're billed by Amazon Web Services each month by design. Instead, it provides you with a\n mechanism to configure, generate, and display rates to certain customers over a given billing\n period. You can also analyze the difference between the rates you apply to your accounting\n groupings relative to your actual rates from Amazon Web Services. As a result of your Amazon Web Services Billing Conductor configuration, the payer account can also see the\n custom rate applied on the billing details page of the Amazon Web Services Billing console, or configure a cost and usage report per\n billing group. This documentation shows how you can configure Amazon Web Services Billing Conductor using its\n API. For more information about using the Amazon Web Services\n Billing Conductor user interface, see the Amazon Web Services Billing Conductor User Guide. \n Creates a billing group that resembles a consolidated billing family that Amazon Web Services charges, based off of the predefined pricing plan computation.\n \n Creates a custom line item that can be used to create a one-time fixed charge that can be applied to a single billing group for the current or previous billing period. The one-time fixed charge is either a fee or discount. \n Creates a pricing plan that is used for computing Amazon Web Services charges for billing groups.\n \n Creates a pricing rule can be associated to a pricing plan, or a set of pricing plans.\n \n A list the tags for a resource.\n Associates the specified tags to a resource with the specified \n Deletes specified tags from a resource.\n Welcome to the Clean Rooms API Reference. Clean Rooms is an Amazon Web Services service that helps multiple parties to join\n their data together in a secure collaboration workspace. In the collaboration, members who\n can query and receive results can get insights into the collective datasets without either\n party getting access to the other party's raw data. To learn more about Clean Rooms concepts, procedures, and best practices, see the\n Clean Rooms User Guide. To learn more about SQL commands, functions, and conditions supported in Clean Rooms, see the\n Clean Rooms SQL Reference. Creates a configured table association. A configured table association links a\n configured table with a collaboration. Tags a resource. Removes a tag or list of tags from a resource. Updates a configured table association. Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the\n change set. For more information, see Updating Stacks Using Change\n Sets in the CloudFormation User Guide. \n This section provides documentation for the Amazon CodeGuru Profiler API operations.\n \n Amazon CodeGuru Profiler collects runtime performance data from your live applications, and provides\n recommendations that can help you fine-tune your application performance. Using machine learning\n algorithms, CodeGuru Profiler can help you find your most expensive lines of code and suggest ways you can\n improve efficiency and remove CPU bottlenecks.\n \n Amazon CodeGuru Profiler provides different visualizations of profiling data to help you identify what code is\n running on the CPU, see how much time is consumed, and suggest ways to reduce CPU utilization.\n Amazon CodeGuru Profiler currently supports applications written in all Java virtual machine (JVM)\n languages and Python. While CodeGuru Profiler supports both visualizations and recommendations for applications\n written in Java, it can also generate visualizations and a subset of recommendations for\n applications written in other JVM languages and Python. \n For more information, see What is Amazon CodeGuru Profiler in \n the Amazon CodeGuru Profiler User Guide.\n These interfaces allow you to apply the AWS library of pre-defined controls to your\norganizational units, programmatically. In this context, controls are the same as AWS Control Tower guardrails. To call these APIs, you'll need to know: the and the ARN associated with the target organizational unit (OU). \n To get the The \n ARN format: \n \n Example:\n \n \n To get the ARN for an OU:\n In the AWS Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU. \n OU ARN format:\n \n \n Details and examples\n \n Creating AWS Control Tower resources with AWS CloudFormation\n To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower\n \n Recording API Requests\n AWS Control Tower supports AWS CloudTrail, a service that records AWS API calls for your AWS account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the AWS Control Tower service received, who made the request and when, and so on. For more about AWS Control Tower and its support for CloudTrail, see Logging AWS Control Tower Actions with AWS CloudTrail in the AWS Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the AWS CloudTrail User Guide. The new Amazon Elastic DocumentDB service endpoint. Create a new Source Network resource for a provided VPC ID. Lists all Recovery Instances or multiple Recovery Instances by ID. AWS Elastic Disaster Recovery Service. Export the Source Network CloudFormation template to an S3 bucket. Writes a block of data to a snapshot. If the specified block contains\n data, the existing data is overwritten. The target snapshot must be in the\n Data written to a snapshot must be aligned with 512-KiB sectors. You should always retry requests that receive server ( Amazon EMR Serverless is a new deployment option for Amazon EMR. Amazon EMR Serverless provides a serverless runtime environment that simplifies running\n analytics applications using the latest open source frameworks such as Apache Spark and\n Apache Hive. With Amazon EMR Serverless, you don’t have to configure, optimize,\n secure, or operate clusters to run applications with these frameworks. The API reference to Amazon EMR Serverless is It is the prefix in the CLI commands for Amazon EMR Serverless. For\n example, It is the prefix before IAM policy actions for Amazon EMR Serverless. For\n example, It is the prefix used in Amazon EMR Serverless service endpoints. For\n example, Welcome to the AWS Entity Resolution API Reference. AWS Entity Resolution is an AWS service that provides pre-configured entity resolution capabilities \n that enable developers and analysts at advertising and marketing companies to build an accurate and \n complete view of their consumers. \n With AWS Entity Resolution, you have the ability to match source records containing consumer identifiers, \n such as name, email address, and phone number. This holds true even when these records have incomplete or \n conflicting identifiers. For example, AWS Entity Resolution can effectively match a source record from a \n customer relationship management (CRM) system, which includes account information like first name, last name, \n postal address, phone number, and email address, with a source record from a marketing system containing \n campaign information, such as username and email address. To learn more about AWS Entity Resolution concepts, procedures, and best practices, see the\n AWS Entity Resolution \n User Guide. Retrieves information about the specified managed policy, including the policy's\n default version and the total number of IAM users, groups, and roles to which the\n policy is attached. To retrieve the list of the specific users, groups, and roles that\n the policy is attached to, use ListEntitiesForPolicy. This operation\n returns metadata about the policy. To retrieve the actual policy document for a specific\n version of the policy, use GetPolicyVersion. This operation retrieves information about managed policies. To retrieve information\n about an inline policy that is embedded with an IAM user, group, or role, use GetUserPolicy, GetGroupPolicy, or GetRolePolicy. For more information about policies, see Managed policies and inline\n policies in the IAM User Guide. Amazon Inspector is a vulnerability discovery service that automates continuous scanning for\n security vulnerabilities within your Amazon EC2 and Amazon ECR environments. Creates a monitor in Amazon CloudWatch Internet Monitor. A monitor is built based on information from the application resources that you add: VPCs,\n\t\t\tNetwork Load Balancers (NLBs), Amazon CloudFront distributions, and Amazon WorkSpaces directories. Internet Monitor then publishes internet measurements from Amazon Web Services \n\t\t\tthat are specific to the city-networks. That is, the locations and ASNs (typically internet service providers or ISPs),\n\t\t\twhere clients access your application. For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User \n\t\t\t\t\tGuide. When you create a monitor, you choose the percentage of traffic that you want to monitor. You can also set a maximum limit for the \n\t\t\tnumber of city-networks where client traffic is monitored, that caps the total traffic that Internet Monitor monitors. A city-network \n\t\t\tmaximum is the limit of city-networks, but you only pay for the number of city-networks that are actually monitored. You can update your monitor\n\t\t\tat any time to change the percentage of traffic to monitor or the city-networks maximum. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide. Amazon CloudWatch Internet Monitor provides visibility into how internet issues impact the performance and availability \n\t\t\tbetween your applications hosted on Amazon Web Services and your end users. It can reduce the time it takes for you to diagnose \n\t\t\tinternet issues from days to minutes. Internet Monitor uses the connectivity data that Amazon Web Services captures from its global \n\t\t\tnetworking footprint to calculate a baseline of performance and availability for internet traffic. This \n\t\t\tis the same data that Amazon Web Services uses to monitor internet uptime and availability. With those measurements \n\t\t\tas a baseline, Internet Monitor raises awareness for you when there are significant problems for your \n\t\t\tend users in the different geographic locations where your application runs. Internet Monitor publishes internet measurements to CloudWatch Logs and CloudWatch Metrics, \n\t\t\tto easily support using CloudWatch tools with health information for geographies and networks specific to your application.\n\t\t\tInternet Monitor sends health events to Amazon EventBridge so that you can set up notifications. If an issue is caused by the Amazon Web Services network, \n\t\t\tyou also automatically receive an Amazon Web Services Health Dashboard notification with the steps that Amazon Web Services is taking to mitigate the problem. To use Internet Monitor, you create a monitor and associate your application's resources \n\t\t\twith it - VPCs, NLBs, CloudFront distributions, or WorkSpaces directories - so Internet Monitor can determine \n\t\t\twhere your application's internet traffic is. Internet Monitor then provides internet measurements from Amazon Web Services that are specific to \n\t\t\tthe locations and ASNs (typically, internet service providers or ISPs) that communicate with your application. For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide. Adds a tag to a resource. Tags are supported only for monitors in Amazon CloudWatch Internet Monitor. You can add a maximum of 50 tags in Internet Monitor. A minimum of one tag is required for this call. It returns an error if you use the Removes a tag from a resource. Amazon Kinesis Data Streams is a managed service that scales elastically for real-time\n processing of streaming big data. Sends user input (text or speech) to Amazon Lex. Clients use this API to\n send text and audio requests to Amazon Lex at runtime. Amazon Lex interprets the\n user input using the machine learning model that it built for the bot. The In response, Amazon Lex returns the next message to convey to the user.\n Consider the following example messages: For a user input \"I would like a pizza,\" Amazon Lex might return a\n response with a message eliciting slot data (for example,\n After the user provides all of the pizza order information, Amazon Lex\n might return a response with a message to get user confirmation:\n \"Order the pizza?\". After the user replies \"Yes\" to the confirmation prompt, Amazon Lex\n might return a conclusion statement: \"Thank you, your cheese pizza has\n been ordered.\". Not all Amazon Lex messages require a response from the user. For example,\n conclusion statements do not require a response. Some messages require\n only a yes or no response. In addition to the If the message is to elicit slot data, Amazon Lex returns the\n following context information: \n \n \n \n If the message is a confirmation prompt, the\n If the message is a clarification prompt configured for the\n intent, indicating that the user intent is not understood, the\n In addition, Amazon Lex also returns your application-specific\n This section contains documentation for the Amazon Lex V2 Runtime V2 API operations. Sends user input to Amazon Lex V2. You can send text or speech. Clients use\n this API to send text and audio requests to Amazon Lex V2 at runtime. Amazon Lex V2\n interprets the user input using the machine learning model built for\n the bot. The following request fields must be compressed with gzip and then\n base64 encoded before you send them to Amazon Lex V2. requestAttributes sessionState The following response fields are compressed using gzip and then\n base64 encoded by Amazon Lex V2. Before you can use these fields, you must\n decode and decompress them. inputTranscript interpretations messages requestAttributes sessionState The example contains a Java application that compresses and encodes\n a Java object to send to Amazon Lex V2, and a second that decodes and\n decompresses a response from Amazon Lex V2. If the optional post-fulfillment response is specified, the messages\n are returned as follows. For more information, see PostFulfillmentStatusSpecification. \n Success message - Returned if\n the Lambda function completes successfully and the intent state is\n fulfilled or ready fulfillment if the message is present. \n Failed message - The failed\n message is returned if the Lambda function throws an exception or\n if the Lambda function returns a failed intent state without a\n message. \n Timeout message - If you\n don't configure a timeout message and a timeout, and the Lambda\n function doesn't return within 30 seconds, the timeout message is\n returned. If you configure a timeout, the timeout message is\n returned when the period times out. For more information, see Completion message. With License Manager, you can discover and track your commercial Linux subscriptions on running\n Amazon EC2 instances. With License Manager, you can create user-based subscriptions to utilize licensed software with\n a per user subscription fee on Amazon EC2 instances. Creates a new application with given parameters. Requires an existing runtime\n environment and application definition file. Creates and starts a deployment to deploy an application into a runtime\n environment. Deletes a specific application. You cannot delete a running application. Deletes a specific runtime environment. The environment cannot contain deployed\n applications. If it does, you must delete those applications before you delete the\n environment. Adds one or more tags to the specified resource. Removes one or more tags from the specified resource. Updates an application and creates a new version. Create a channel to start receiving content streams. The channel represents the input to MediaPackage for incoming live content from an encoder such as AWS Elemental MediaLive. The channel receives content, and after packaging it, outputs it through an origin endpoint to downstream devices (such as video players or CDNs) that request the content. You can create only one channel with each request. We recommend that you spread out channels between channel groups, such as putting redundant channels in the same AWS Region in different channel groups. Create a channel group to group your channels and origin endpoints. A channel group is the top-level resource that consists of channels and origin endpoints that are associated with it and that provides predictable URLs for stream delivery. All channels and origin endpoints within the channel group are guaranteed to share the DNS. You can create only one channel group with each request. The endpoint is attached to a channel, and represents the output of the live content. You can associate multiple endpoints to a single channel. Each endpoint gives players and downstream CDNs (such as Amazon CloudFront) access to the content for playback. Content can't be served from a channel until it has an endpoint. You can create only one endpoint with each request. Assigns one of more tags (key-value pairs) to the specified MediaPackage resource. Tags can help you organize and categorize your resources. You can also use them to scope user \n permissions, by granting a user permission to access or change only resources with certain tag values.\n You can use the TagResource operation with a resource that already has tags. If you specify a new tag \n key for the resource, this tag is appended to the list of tags associated with the resource. If you \n specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. Removes one or more tags from the specified resource. Uploads an object to the specified path. Object sizes are limited to 25 MB for standard upload availability and 10 MB for streaming upload availability. Create a data store. Adds a user-specifed key and value tag to a medical imaging resource. Removes tags from a medical imaging resource. The Application Migration Service service. Creates a streaming session stream for a streaming session. After invoking this API, invoke GetStreamingSessionStream with the returned streamId\n to poll the resource until it is in the Create a new studio. When creating a studio, two IAM roles must be provided: the admin role\n and the user role. These roles are assumed by your users when they log in to the Nimble Studio portal. The user role must have the The admin role must have the You may optionally specify a KMS key in the\n In Nimble Studio, resource names, descriptions, initialization scripts, and other\n data you provide are always encrypted at rest using an KMS key. By default, this key is\n owned by Amazon Web Services and managed on your behalf. You may provide your own KMS key\n when calling When providing an KMS key during studio creation, Nimble Studio creates KMS\n grants in your account to provide your studio user and admin roles access to these KMS\n keys. If you delete this grant, the studio will no longer be accessible to your portal\n users. If you delete the studio KMS key, your studio will no longer be accessible. Deletes streaming session resource. After invoking this operation, use GetStreamingSession to poll the resource until it\n transitions to a A streaming session will count against your streaming session quota until it is marked\n Delete a studio resource. Get a launch profile initialization. Gets StreamingSession resource. Invoke this operation to poll for a streaming session state while creating or deleting\n a session. Gets Invoke this operation to poll for a streaming session backup while stopping a\n streaming session. Gets a StreamingSessionStream for a streaming session. Invoke this operation to poll the resource after invoking\n After the List all the launch profiles a studio. Lists the backups of a streaming session in a studio. Lists the streaming sessions in a studio. Add/update users with given persona to launch profile membership. Add/update users with given persona to studio membership. Transitions sessions from the Repairs the IAM Identity Center configuration for a given studio. If the studio has a valid IAM Identity Center configuration currently associated with\n it, this operation will fail with a validation error. If the studio does not have a valid IAM Identity Center configuration currently\n associated with it, then a new IAM Identity Center application is created for the studio\n and the studio is changed to the After the IAM Identity Center application is repaired, you must use the Amazon Nimble Studio console to add administrators and users to your studio. Transitions sessions from the Update a Studio resource. Currently, this operation only supports updating the displayName of your\n studio. Welcome to the Amazon Nimble Studio API reference. This API reference provides\n methods, schema, resources, parameters, and more to help you get the most out of Nimble\n Studio. Nimble Studio is a virtual studio that empowers visual effects, animation, and\n interactive content teams to create content securely within a scalable, private cloud\n service. Creates a link between a source account and a sink that you have created in a monitoring account. Before you create a link, you must create a sink in the monitoring account and create a\n sink policy in that account. The sink policy must permit the source account to link to it. You\n can grant permission to source accounts by granting permission to an entire organization or to\n individual accounts. For more information, see\n CreateSink and\n PutSinkPolicy. Each monitoring account can be linked to as many as 100,000 source accounts. Each source account can be linked to as many as five monitoring accounts. Use this to create a sink in the current account, so that it can be\n used as a monitoring account in CloudWatch cross-account observability. A sink is a resource that\n represents an attachment point in a monitoring account. Source accounts can link to the sink\n to send observability data. After you create a sink, you must create a sink policy that allows source accounts to attach to it.\n For more information, see PutSinkPolicy. Each account can contain one sink. If you delete a sink, you can then create a new one in that account. Deletes a link between a monitoring account sink and a source account. You must run this operation\n in the source account. Deletes a sink. You must delete all links to a sink before you can delete that sink. Returns complete information about one link. To use this operation, provide the link ARN. To retrieve a list of link ARNs, use ListLinks. Returns complete information about one monitoring account sink. To use this operation, provide the sink ARN. To retrieve a list of sink ARNs, use ListSinks. Returns the current sink policy attached to this sink. The sink policy specifies what\n accounts can attach to this sink as source accounts, and what types of data they can share. Returns a list of source account links that are linked to this monitoring account sink. To use this operation, provide the sink ARN. To retrieve a list of sink ARNs, use ListSinks. To find a list of links for one source account, use ListLinks. Creates or updates the resource policy that grants permissions to source\n accounts to link to the monitoring account sink. When you create a sink policy, you can grant\n permissions to all accounts in an organization or to individual accounts. You can also use a sink policy to limit the types of data that is shared. The three types that\n you can allow or deny are: \n Metrics - Specify with\n \n Log groups - Specify with \n Traces - Specify with See the examples in this section to see how to specify permitted source accounts and data types. Assigns one or more tags (key-value pairs) to the specified resource.\n Both sinks and links can be tagged. Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values. Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters. You can use the You can associate as many as 50 tags with a resource. Unlike tagging permissions in other Amazon Web Services services, to tag or untag links and\n sinks you must have the Removes one or more tags from the specified resource. Unlike tagging permissions in other Amazon Web Services services, to tag or untag links and\n sinks you must have the Use this operation to change what types of data are shared from a source account to its linked\n monitoring account sink. You can't change the sink or change the monitoring account with this operation. To update the list of tags associated with the sink, use\n TagResource. Use Amazon CloudWatch Observability Access Manager to create and manage links between source accounts and\n monitoring accounts by using CloudWatch cross-account observability. With\n CloudWatch cross-account observability, you can monitor and troubleshoot applications that span\n multiple accounts within a Region. Seamlessly search, visualize, and analyze your metrics,\n logs, and traces in any of the linked accounts without account boundaries. Set up one or more Amazon Web Services accounts as monitoring\n accounts and link them with multiple source accounts. A\n monitoring account is a central Amazon Web Services account that can view and interact with\n observability data generated from source accounts. A source account is an individual Amazon Web Services account that generates observability data for the resources that reside in it.\n Source accounts share their observability data with the monitoring account. The shared\n observability data can include metrics in Amazon CloudWatch, logs in Amazon CloudWatch Logs, and traces in X-Ray. Creates a reference store. Creates a sequence store. This is the AWS HealthOmics API Reference. For an introduction to the service, see What is AWS HealthOmics? in the\n AWS HealthOmics User Guide. Amazon Web Services Private CA Connector for Active Directory creates a connector between Amazon Web Services Private CA and Active Directory (AD) that enables you to\n provision security certificates for AD signed by a private CA that you own. For more\n information, see Amazon Web Services Private CA Connector for Active Directory. Create a pipe. Amazon EventBridge Pipes connect event sources to targets and reduces the need for specialized knowledge and integration code. Delete an existing pipe. For more information about pipes, see Amazon EventBridge Pipes in the Amazon EventBridge User Guide. Get the information about an existing pipe. For more information about pipes, see Amazon EventBridge Pipes in the Amazon EventBridge User Guide. Displays the tags associated with a pipe. Start an existing pipe. Stop an existing pipe. Assigns one or more tags (key-value pairs) to the specified pipe. Tags can\n help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user permission to access or change only resources with certain tag\n values. Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of\n characters. You can use the You can associate as many as 50 tags with a pipe. Removes one or more tags from the specified pipes. Update an existing pipe. When you call For more information about pipes, see \n Amazon EventBridge Pipes in the Amazon EventBridge User Guide. Amazon Web Services Private 5G is a managed service that makes it easy to deploy, operate, and scale\n your own private mobile network at your on-premises location. Private 5G provides the\n pre-configured hardware and software for mobile networks, helps automate setup, and\n scales capacity on demand to support additional devices as needed. This is an interface reference for Amazon Redshift Serverless. \n It contains documentation for one of the programming or command line interfaces you can use to manage Amazon Redshift Serverless.\n Amazon Redshift Serverless automatically provisions data warehouse capacity and intelligently scales the \n underlying resources based on workload demands. Amazon Redshift Serverless adjusts capacity in seconds to deliver consistently high \n performance and simplified operations for even the most demanding and volatile workloads. Amazon Redshift Serverless lets you\n focus on using your data to acquire new insights for your business and customers.\n \n To learn more about Amazon Redshift Serverless, \n see What is Amazon Redshift Serverless.\n Retrieves objects from Amazon S3. To use An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object To get an object from such a logical hierarchy, specify the full key name for the object\n in the For more information about returning the ACL of an object, see GetObjectAcl. If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n Encryption request headers, like If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers: \n \n \n For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys). Assuming you have the relevant permission to read object tags, the response also returns\n the You need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions in a\n Policy. If the object that you request doesn’t exist, the error that Amazon S3 returns depends\n on whether you also have the If you have the If you don’t have the By default, the If you supply a If the current version of the object is a delete marker, Amazon S3 behaves as if the\n object was deleted and includes For more information about versioning, see PutBucketVersioning. There are times when you want to override certain response header values in a You can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request. \n \n \n \n \n \n If both of the If both of the For more information about conditional requests, see RFC 7232. The following operations are related to \n ListBuckets\n \n GetObjectAcl\n Passes transformed objects to a This operation supports metadata that can be returned by GetObject, in addition to\n You can include any number of metadata headers. When including a metadata header, it\n should be prefaced with Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to\n detect and redact personally identifiable information (PII) and decompress S3 objects.\n These Lambda functions are available in the Amazon Web Services Serverless Application Repository, and\n can be selected through the Amazon Web Services Management Console when you create your Object Lambda access point. Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a\n natural language processing (NLP) service using machine learning to find insights and\n relationships in text. It automatically detects personally identifiable information (PII)\n such as names, addresses, dates, credit card numbers, and social security numbers from\n documents in your Amazon S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural\n language processing (NLP) service using machine learning to find insights and relationships\n in text. It automatically redacts personally identifiable information (PII) such as names,\n addresses, dates, credit card numbers, and social security numbers from documents in your\n Amazon S3 bucket. Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is\n equipped to decompress objects stored in S3 in one of six compressed file formats including\n bzip2, gzip, snappy, zlib, zstandard and ZIP. For information on how to view and use these functions, see Using Amazon Web Services built Lambda\n functions in the Amazon S3 User Guide. Provides APIs for creating and managing SageMaker geospatial resources. Returns the description of an endpoint. Describes a SageMaker image. Describes a version of a SageMaker image. Returns information about a notebook instance. Returns a description of a processing job. Returns information about a training job. Some of the attributes below only appear if the training job successfully starts.\n If the training job fails, Returns information about a transform job. The time at which the schedule was created. The time at which the schedule was last modified. \n Updates the specified schedule. When you call \n Before calling this operation, we recommend that you call the Removes a custom log source from Amazon Security Lake, to stop sending data from the custom\n source to Security Lake. Retrieves the configuration that will be automatically set up for accounts added to the\n organization after the organization has onboarded to Amazon Security Lake. This API does not take\n input parameters. Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to\n automatically centralize security data from cloud, on-premises, and custom sources into a\n data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations\n is an account management service that lets you consolidate multiple Amazon Web Services\n accounts into an organization that you create and centrally manage. With Organizations, you\n can create member accounts and invite existing accounts to join your organization.\n Security Lake helps you analyze security data for a more complete understanding of your\n security posture across the entire organization. It can also help you improve the\n protection of your workloads, applications, and data. The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you\n retain ownership over your data. Amazon Security Lake integrates with CloudTrail, a service that provides a record of\n actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls\n from the Security Lake console and code calls to the Security Lake API operations. If you create a\n trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still\n view the most recent events in the CloudTrail console in Event history. Using the\n information collected by CloudTrail you can determine the request that was made to\n Security Lake, the IP address from which the request was made, who made the request, when it\n was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide. Security Lake automates the collection of security-related log and event data from\n integrated Amazon Web Services and third-party services. It also helps you manage\n the lifecycle of data with customizable retention and replication settings. Security Lake\n converts ingested data into Apache Parquet format and a standard open-source schema called\n the Open Cybersecurity Schema Framework (OCSF). Other Amazon Web Services and third-party services can subscribe to the data that's stored in Security Lake for \n incident response and security data analytics. Adds or updates one or more tags that are associated with an Amazon Security Lake resource: a subscriber, or the data lake configuration for your \n Amazon Web Services account in a particular Amazon Web Services Region. A tag is a label that you can define and associate with \n Amazon Web Services resources. Each tag consists of a required tag key and an associated tag value. A \n tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a \n descriptor for a tag key. Tags can help you identify, categorize, and manage resources in different ways, such as by owner, environment, or other \n criteria. For more information, see \n Tagging Amazon Security Lake resources in the \n Amazon Security Lake User Guide. Removes one or more tags (keys and values) from an Amazon Security Lake resource: a subscriber, or the data lake configuration for your \n Amazon Web Services account in a particular Amazon Web Services Region. Starts a simulation with the given name. You must choose to start your\n simulation from a schema or from a snapshot.\n For more information about the schema, see the schema reference \n in the SimSpace Weaver User Guide.\n For more information about snapshots, see Snapshots\n in the SimSpace Weaver User Guide. Adds tags to a SimSpace Weaver resource. For more information about tags, see Tagging Amazon Web Services resources in the\n Amazon Web Services General Reference. Removes tags from a SimSpace Weaver resource. For more information about tags, see Tagging Amazon Web Services resources in the\n Amazon Web Services General Reference. The Amazon Web Services Region name to add to the replication set. The Amazon Resource Name (ARN) of the automation process. Before the specified timestamp After the specified timestamp. A token that ensures that the operation is called only once with the specified\n details. A token ensuring that the operation is called only once with the specified details. The short format name of the response plan. Can't include spaces. A token that ensures that a client calls the action only once with the specified\n details. The Amazon Resource Name (ARN) of the incident record that the action adds the incident\n to. The time that the event occurred. The type of event. You can create timeline events of type The Amazon Resource Name (ARN) of the incident record you are deleting. The name of the Amazon Web Services Region you're deleting from the replication set. The Amazon Resource Name (ARN) of the replication set you're deleting. The Amazon Resource Name (ARN) of the resource you're deleting the policy from. The ID of the resource policy you're deleting. The Amazon Resource Name (ARN) of the response plan. The Amazon Resource Name (ARN) of the incident that includes the timeline event. The ID of the event to update. You can use Variable dynamic parameters. A parameter value is determined when an incident is\n created. The Amazon Resource Name (ARN) of an Amazon Web Services resource referenced in a\n The ID of a The Amazon Resource Name (ARN) of the incident record. The Amazon Resource Name (ARN) of the replication set you want to retrieve. The Amazon Resource Name (ARN) of the response plan with the attached resource policy.\n The maximum number of resource policies to display for each page of results. The pagination token to continue to the next page of results. The Amazon Resource Name (ARN) of the response plan. The Amazon Resource Name (ARN) of the incident that includes the timeline event. The ID of the event. You can get an event's ID when you create it, or by using\n The principal that started the incident. The service principal that assumed the role specified in The resource that caused the incident to be created. The service that started the incident. This can be manually created from Incident Manager,\n automatically created using an Amazon CloudWatch alarm, or Amazon EventBridge\n event. Used to stop Incident Manager from creating multiple incident records for the same incident.\n The type of related item. The Amazon Resource Name (ARN) of the related item, if the related item is an Amazon\n resource. The maximum number of results per page. The pagination token to continue to the next page of results. The Amazon Resource Name (ARN) of the incident record containing the listed related\n items. The maximum number of related items per page. The pagination token to continue to the next page of results. The maximum number of results per page. The pagination token to continue to the next page of results. The maximum number of response plans per page. The pagination token to continue to the next page of results. The Amazon Resource Name (ARN) of the response plan. The Amazon Resource Name (ARN) of the incident that includes the timeline event. Sort timeline events by the specified key value pair. Sorts the order of timeline events by the value specified in the The maximum number of results per page. The pagination token to continue to the next page of results. The Amazon Resource Name (ARN) of the SNS topic. The Amazon Resource Name (ARN) of the response plan to add the resource policy to. Details of the resource policy. Systems Manager Incident Manager is an incident management console designed to help users\n mitigate and recover from incidents affecting their Amazon Web Services-hosted applications. An\n incident is any unplanned interruption or reduction in quality of services. Incident Manager increases incident resolution by notifying responders of impact,\n highlighting relevant troubleshooting data, and providing collaboration tools to get services\n back up and running. To achieve the primary goal of reducing the time-to-resolution of\n critical incidents, Incident Manager automates response plans and enables responder\n team escalation. The Amazon Resource Name (ARN) of the role that the automation document will assume when\n running commands. The automation document's name. The account that the automation document will be run in. This can be in either the\n management account or an application account. A token ensuring that the operation is called only once with the specified details. The Amazon Resource Name (ARN) of the response plan that pre-defines summary, chat\n channels, Amazon SNS topics, runbooks, title, and impact of the incident. The Amazon Resource Name (ARN) of the response plan you're adding the tags to. Identifies the service that sourced the event. All events sourced from within Amazon Web Services begin with \" The Amazon Resource Name (ARN) of the source that detected the incident. The time that the incident was detected. The Amazon Resource Name (ARN) of the response plan you're removing a tag from. The Amazon Resource Name (ARN) of the replication set to update. Specifies if deletion protection is turned on or off in your account. A token that ensures that the operation is called only once with the specified\n details. A token that ensures that a client calls the operation only once with the specified\n details. The Amazon Resource Name (ARN) of the incident record you are updating. The status of the incident. Possible statuses are A token that ensures that a client calls the operation only once with the specified\n details. The Amazon Resource Name (ARN) of the incident record that contains the related items that\n you update. The Amazon Resource Name (ARN) of the replication set you're updating. A token that ensures that the operation is called only once with the specified\n details. A token ensuring that the operation is called only once with the specified details. The Amazon Resource Name (ARN) of the response plan. The string Incident Manager uses to prevent duplicate incidents from being created by the same\n incident in the same account. A token that ensures that a client calls the operation only once with the specified\n details. The Amazon Resource Name (ARN) of the incident that includes the timeline event. The ID of the event to update. You can use The time that the event occurred. The type of event. You can update events of type Amazon Web Services Telco Network Builder (TNB) is a network automation service that helps you deploy and manage telecom networks. AWS TNB helps you with the lifecycle management of your telecommunication network functions throughout planning, deployment, and post-deployment activities. Amazon Transcribe streaming offers three main types of real-time transcription: \n Standard, Medical, and \n Call Analytics. \n Standard transcriptions are the most common option. Refer\n to for details. \n Medical transcriptions are tailored to medical professionals \n and incorporate medical terms. A common use case for this service is transcribing doctor-patient \n dialogue in real time, so doctors can focus on their patient instead of taking notes. Refer to\n for details. \n Call Analytics transcriptions are designed for use with call\n center audio on two different channels; if you're looking for insight into customer service calls, use this \n option. Refer to for details. Amazon Verified Permissions is a permissions management service from Amazon Web Services. You can use Verified Permissions to manage\n permissions for your application, and authorize user access based on those permissions.\n Using Verified Permissions, application developers can grant access based on information about the\n users, resources, and requested actions. You can also evaluate additional information\n like group membership, attributes of the resources, and session context, such as time of\n request and IP addresses. Verified Permissions manages these permissions by letting you create and\n store authorization policies for your applications, such as consumer-facing web sites\n and enterprise business systems. Verified Permissions uses Cedar as the policy language to express your permission requirements.\n Cedar supports both role-based access control (RBAC) and attribute-based access\n control (ABAC) authorization models. For more information about configuring, administering, and using Amazon Verified Permissions in your\n applications, see the Amazon Verified Permissions User Guide. For more information about the Cedar policy language, see the Cedar Policy Language Guide. When you write Cedar policies that reference principals, resources and actions,\n you can define the unique identifiers used for each of those elements. We strongly\n recommend that you follow these best practices: \n Use values like universally unique identifiers\n (UUIDs) for all principal and resource identifiers.\n For example, if user Where you use a UUID for an entity, we recommend that you follow it with\n the // comment specifier and the ‘friendly’ name of your entity. This helps\n to make your policies easier to understand. For example: principal ==\n User::\"a1b2c3d4-e5f6-a1b2-c3d4-EXAMPLE11111\", // alice \n Do not include personally identifying, confidential,\n or sensitive information as part of the unique identifier for your\n principals or resources. These identifiers are included in\n log entries shared in CloudTrail trails. Several operations return structures that appear similar, but have different purposes.\n As new functionality is added to the product, the structure used in a parameter of one\n operation might need to change in a way that wouldn't make sense for the same parameter\n in a different operation. To help you understand the purpose of each, the following\n naming convention is used for the structures: Parameter type structures that end in Parameter type structures that end in Parameter type structures that use neither suffix are used in the mutating\n (create and update) operations. A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the Amazon Web Services\n SDK populates this field. For more information about idempotency, see\n Making retries safe with idempotent APIs. A list of tags you want added to the domain. Information about the specified domain. Amazon VPC Lattice is a fully managed application networking service that you use to connect, secure,\n and monitor all of your services across multiple accounts and virtual private clouds (VPCs).\n Amazon VPC Lattice interconnects your microservices and legacy services within a logical boundary, so that\n you can discover and manage them more efficiently. For more information, see the Amazon VPC Lattice User Guide\n Base exception class for all service exceptions from RestJsonProtocol service. Base exception class for all service exceptions from RestXmlProtocol service. Base exception class for all service exceptions from RestXmlProtocol service.PutRawMessageContent
from an AWS Lambda function
* configured with a synchronous
diff --git a/clients/client-workmailmessageflow/src/endpoint/ruleset.ts b/clients/client-workmailmessageflow/src/endpoint/ruleset.ts
index 2c518d2fce3ba..e3dbe608b14b1 100644
--- a/clients/client-workmailmessageflow/src/endpoint/ruleset.ts
+++ b/clients/client-workmailmessageflow/src/endpoint/ruleset.ts
@@ -6,24 +6,25 @@ import { RuleSetObject } from "@smithy/types";
or see "smithy.rules#endpointRuleSet"
in codegen/sdk-codegen/aws-models/workmailmessageflow.json */
-const p="required",
-q="fn",
-r="argv",
-s="ref";
-const a="PartitionResult",
+const q="required",
+r="fn",
+s="argv",
+t="ref";
+const a="isSet",
b="tree",
c="error",
d="endpoint",
-e={[p]:false,"type":"String"},
-f={[p]:true,"default":false,"type":"Boolean"},
-g={[s]:"Endpoint"},
-h={[q]:"booleanEquals",[r]:[{[s]:"UseFIPS"},true]},
-i={[q]:"booleanEquals",[r]:[{[s]:"UseDualStack"},true]},
-j={},
-k={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsFIPS"]}]},
-l={[q]:"booleanEquals",[r]:[true,{[q]:"getAttr",[r]:[{[s]:a},"supportsDualStack"]}]},
-m=[g],
-n=[h],
-o=[i];
-const _data={version:"1.0",parameters:{Region:e,UseDualStack:f,UseFIPS:f,Endpoint:e},rules:[{conditions:[{[q]:"aws.partition",[r]:[{[s]:"Region"}],assign:a}],type:b,rules:[{conditions:[{[q]:"isSet",[r]:m},{[q]:"parseURL",[r]:m,assign:"url"}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:g,properties:j,headers:j},type:d}]}]},{conditions:[h,i],type:b,rules:[{conditions:[k,l],type:b,rules:[{endpoint:{url:"https://workmailmessageflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[k],type:b,rules:[{endpoint:{url:"https://workmailmessageflow-fips.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://workmailmessageflow.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:j,headers:j},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://workmailmessageflow.{Region}.{PartitionResult#dnsSuffix}",properties:j,headers:j},type:d}]}]};
+e="PartitionResult",
+f={[q]:false,"type":"String"},
+g={[q]:true,"default":false,"type":"Boolean"},
+h={[t]:"Endpoint"},
+i={[r]:"booleanEquals",[s]:[{[t]:"UseFIPS"},true]},
+j={[r]:"booleanEquals",[s]:[{[t]:"UseDualStack"},true]},
+k={},
+l={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsFIPS"]}]},
+m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"]}]},
+n=[i],
+o=[j],
+p=[{[t]:"Region"}];
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://workmailmessageflow-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://workmailmessageflow-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://workmailmessageflow.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://workmailmessageflow.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-workspaces/src/endpoint/ruleset.ts b/clients/client-workspaces/src/endpoint/ruleset.ts
index e324fa3d91c39..c60c114c6271e 100644
--- a/clients/client-workspaces/src/endpoint/ruleset.ts
+++ b/clients/client-workspaces/src/endpoint/ruleset.ts
@@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"
n=[i],
o=[j],
p=[{[t]:"Region"}];
-const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://workspaces-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://workspaces-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://workspaces.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://workspaces.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]};
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://workspaces-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://workspaces-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://workspaces.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://workspaces.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/clients/client-xray/src/endpoint/ruleset.ts b/clients/client-xray/src/endpoint/ruleset.ts
index 939dfd01c9ab1..04952f4677560 100644
--- a/clients/client-xray/src/endpoint/ruleset.ts
+++ b/clients/client-xray/src/endpoint/ruleset.ts
@@ -26,5 +26,5 @@ m={[r]:"booleanEquals",[s]:[true,{[r]:"getAttr",[s]:[{[t]:e},"supportsDualStack"
n=[i],
o=[j],
p=[{[t]:"Region"}];
-const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{type:b,rules:[{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]}]},{type:b,rules:[{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://xray-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://xray-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{type:b,rules:[{endpoint:{url:"https://xray.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{type:b,rules:[{endpoint:{url:"https://xray.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]}]},{error:"Invalid Configuration: Missing Region",type:c}]}]};
+const _data={version:"1.0",parameters:{Region:f,UseDualStack:g,UseFIPS:g,Endpoint:f},rules:[{conditions:[{[r]:a,[s]:[h]}],type:b,rules:[{conditions:n,error:"Invalid Configuration: FIPS and custom endpoint are not supported",type:c},{conditions:o,error:"Invalid Configuration: Dualstack and custom endpoint are not supported",type:c},{endpoint:{url:h,properties:k,headers:k},type:d}]},{conditions:[{[r]:a,[s]:p}],type:b,rules:[{conditions:[{[r]:"aws.partition",[s]:p,assign:e}],type:b,rules:[{conditions:[i,j],type:b,rules:[{conditions:[l,m],type:b,rules:[{endpoint:{url:"https://xray-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS and DualStack are enabled, but this partition does not support one or both",type:c}]},{conditions:n,type:b,rules:[{conditions:[l],type:b,rules:[{endpoint:{url:"https://xray-fips.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]},{error:"FIPS is enabled but this partition does not support FIPS",type:c}]},{conditions:o,type:b,rules:[{conditions:[m],type:b,rules:[{endpoint:{url:"https://xray.{Region}.{PartitionResult#dualStackDnsSuffix}",properties:k,headers:k},type:d}]},{error:"DualStack is enabled but this partition does not support DualStack",type:c}]},{endpoint:{url:"https://xray.{Region}.{PartitionResult#dnsSuffix}",properties:k,headers:k},type:d}]}]},{error:"Invalid Configuration: Missing Region",type:c}]};
export const ruleSet: RuleSetObject = _data;
diff --git a/codegen/sdk-codegen/aws-models/accessanalyzer.json b/codegen/sdk-codegen/aws-models/accessanalyzer.json
index e2dbe449b9ea1..bbefb348e6e1e 100644
--- a/codegen/sdk-codegen/aws-models/accessanalyzer.json
+++ b/codegen/sdk-codegen/aws-models/accessanalyzer.json
@@ -3840,7 +3840,9 @@
"items": "findings"
},
"smithy.api#readonly": {},
- "smithy.api#suppress": ["HttpMethodSemantics"]
+ "smithy.api#suppress": [
+ "HttpMethodSemantics"
+ ]
}
},
"com.amazonaws.accessanalyzer#ListAccessPreviewFindingsRequest": {
@@ -4018,7 +4020,9 @@
"items": "analyzedResources"
},
"smithy.api#readonly": {},
- "smithy.api#suppress": ["HttpMethodSemantics"]
+ "smithy.api#suppress": [
+ "HttpMethodSemantics"
+ ]
}
},
"com.amazonaws.accessanalyzer#ListAnalyzedResourcesRequest": {
@@ -4282,7 +4286,9 @@
"items": "findings"
},
"smithy.api#readonly": {},
- "smithy.api#suppress": ["HttpMethodSemantics"]
+ "smithy.api#suppress": [
+ "HttpMethodSemantics"
+ ]
}
},
"com.amazonaws.accessanalyzer#ListFindingsRequest": {
@@ -5906,7 +5912,9 @@
"items": "findings"
},
"smithy.api#readonly": {},
- "smithy.api#suppress": ["HttpMethodSemantics"]
+ "smithy.api#suppress": [
+ "HttpMethodSemantics"
+ ]
}
},
"com.amazonaws.accessanalyzer#ValidatePolicyFinding": {
@@ -6185,4 +6193,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/account.json b/codegen/sdk-codegen/aws-models/account.json
index b0783d857a828..75db6ae1e1cbd 100644
--- a/codegen/sdk-codegen/aws-models/account.json
+++ b/codegen/sdk-codegen/aws-models/account.json
@@ -48,7 +48,9 @@
"outputToken": "NextToken",
"pageSize": "MaxResults"
},
- "smithy.api#suppress": ["EventSource"],
+ "smithy.api#suppress": [
+ "EventSource"
+ ],
"smithy.api#title": "AWS Account",
"smithy.rules#endpointRuleSet": {
"version": "1.0",
@@ -1326,7 +1328,9 @@
"code": 200
},
"smithy.api#readonly": {},
- "smithy.api#suppress": ["HttpMethodSemantics"]
+ "smithy.api#suppress": [
+ "HttpMethodSemantics"
+ ]
}
},
"com.amazonaws.account#GetAlternateContactRequest": {
@@ -1397,7 +1401,9 @@
"code": 200
},
"smithy.api#readonly": {},
- "smithy.api#suppress": ["HttpMethodSemantics"]
+ "smithy.api#suppress": [
+ "HttpMethodSemantics"
+ ]
}
},
"com.amazonaws.account#GetContactInformationRequest": {
@@ -1458,7 +1464,9 @@
"code": 200
},
"smithy.api#readonly": {},
- "smithy.api#suppress": ["HttpMethodSemantics"]
+ "smithy.api#suppress": [
+ "HttpMethodSemantics"
+ ]
}
},
"com.amazonaws.account#GetRegionOptStatusRequest": {
@@ -1555,7 +1563,9 @@
"items": "Regions"
},
"smithy.api#readonly": {},
- "smithy.api#suppress": ["HttpMethodSemantics"]
+ "smithy.api#suppress": [
+ "HttpMethodSemantics"
+ ]
}
},
"com.amazonaws.account#ListRegionsRequest": {
@@ -2016,4 +2026,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/acm-pca.json b/codegen/sdk-codegen/aws-models/acm-pca.json
index 3ef4a29dea692..3147126ababb1 100644
--- a/codegen/sdk-codegen/aws-models/acm-pca.json
+++ b/codegen/sdk-codegen/aws-models/acm-pca.json
@@ -4642,4 +4642,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/acm.json b/codegen/sdk-codegen/aws-models/acm.json
index 90bba96c0b01f..577366ff78db1 100644
--- a/codegen/sdk-codegen/aws-models/acm.json
+++ b/codegen/sdk-codegen/aws-models/acm.json
@@ -3632,4 +3632,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/alexa-for-business.json b/codegen/sdk-codegen/aws-models/alexa-for-business.json
index 1c74e86ea55c9..a88488fc8cd1b 100644
--- a/codegen/sdk-codegen/aws-models/alexa-for-business.json
+++ b/codegen/sdk-codegen/aws-models/alexa-for-business.json
@@ -10758,4 +10758,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/amp.json b/codegen/sdk-codegen/aws-models/amp.json
index 4f1683d603ea2..aa883c0db9552 100644
--- a/codegen/sdk-codegen/aws-models/amp.json
+++ b/codegen/sdk-codegen/aws-models/amp.json
@@ -3562,4 +3562,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/amplify.json b/codegen/sdk-codegen/aws-models/amplify.json
index e622772787f4f..0ab1ed84143af 100644
--- a/codegen/sdk-codegen/aws-models/amplify.json
+++ b/codegen/sdk-codegen/aws-models/amplify.json
@@ -6462,4 +6462,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/amplifybackend.json b/codegen/sdk-codegen/aws-models/amplifybackend.json
index 7f94fedac1abc..316169037764d 100644
--- a/codegen/sdk-codegen/aws-models/amplifybackend.json
+++ b/codegen/sdk-codegen/aws-models/amplifybackend.json
@@ -5968,4 +5968,4 @@
"type": "string"
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/amplifyuibuilder.json b/codegen/sdk-codegen/aws-models/amplifyuibuilder.json
index 494be37b3433f..5d41ce4e3a051 100644
--- a/codegen/sdk-codegen/aws-models/amplifyuibuilder.json
+++ b/codegen/sdk-codegen/aws-models/amplifyuibuilder.json
@@ -121,7 +121,12 @@
"externalDocumentation": "${DocHomeURL}IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-tagkeys"
}
},
- "aws.iam#supportedPrincipalTypes": ["Root", "IAMUser", "IAMRole", "FederatedUser"],
+ "aws.iam#supportedPrincipalTypes": [
+ "Root",
+ "IAMUser",
+ "IAMRole",
+ "FederatedUser"
+ ],
"aws.protocols#restJson1": {},
"smithy.api#cors": {},
"smithy.api#documentation": "resourceArn
.\n If existing tags on a resource are not specified in the request parameters, they are not\n changed. \n
\n ControlARN
for the control--that is, the\n guardrail--you are targeting,ControlARN
for your AWS Control Tower guardrail:\n ControlARN
contains the control name which is specified in each guardrail. For a list of control names for Strongly recommended and Elective guardrails, see Resource identifiers for APIs and guardrails in the Automating tasks section of the AWS Control Tower User Guide. Remember that Mandatory guardrails cannot be added or removed.arn:aws:controltower:{REGION}::control/{CONTROL_NAME}
\n arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED
\n arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}
\n \n
\n pending
state.5xx
) \n error responses, and ThrottlingException
and RequestThrottledException
\n client error responses. For more information see Error retries in the \n Amazon Elastic Compute Cloud User Guide.emr-serverless
. The\n emr-serverless
prefix is used in the following scenarios: \n
",
"smithy.api#title": "EMR Serverless",
@@ -3551,4 +3556,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/emr.json b/codegen/sdk-codegen/aws-models/emr.json
index dab43bdf0481a..e08217697fa10 100644
--- a/codegen/sdk-codegen/aws-models/emr.json
+++ b/codegen/sdk-codegen/aws-models/emr.json
@@ -10375,4 +10375,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/entityresolution.json b/codegen/sdk-codegen/aws-models/entityresolution.json
index 6c8bd2f49ed1b..bcf90e9768104 100644
--- a/codegen/sdk-codegen/aws-models/entityresolution.json
+++ b/codegen/sdk-codegen/aws-models/entityresolution.json
@@ -73,7 +73,11 @@
"x-amzn-platform-id",
"x-amzn-trace-id"
],
- "additionalExposedHeaders": ["x-amzn-errortype", "x-amzn-requestid", "x-amzn-trace-id"],
+ "additionalExposedHeaders": [
+ "x-amzn-errortype",
+ "x-amzn-requestid",
+ "x-amzn-trace-id"
+ ],
"maxAge": 86400
},
"smithy.api#documentation": "aws emr-serverless start-job-run
.\"Action\": [\"emr-serverless:StartJobRun\"]
. For more information,\n see Policy actions for Amazon EMR Serverless.emr-serverless.us-east-2.amazonaws.com
.TagResource
request with 0 tags.PostContent
operation supports audio input at 8kHz\n and 16kHz. You can use 8kHz audio to achieve higher speech recognition\n accuracy in telephone audio applications. \n
\n PizzaSize
): \"What size pizza would you like?\". message
, Amazon Lex\n provides additional context about the message in the response that you can\n use to enhance client behavior, such as displaying the appropriate client\n user interface. Consider the following examples: \n
\n \n
\n x-amz-lex-dialog-state
header set to\n ElicitSlot
\n x-amz-lex-intent-name
header set to the intent name\n in the current context x-amz-lex-slot-to-elicit
header set to the slot name\n for which the message
is eliciting information\n x-amz-lex-slots
header set to a map of slots\n configured for the intent with their current values x-amz-lex-dialog-state
header is set to\n Confirmation
and the\n x-amz-lex-slot-to-elicit
header is omitted. x-amz-dialog-state
header is set to\n ElicitIntent
and the x-amz-slot-to-elicit
\n header is omitted. sessionAttributes
. For more information, see Managing\n Conversation Context. \n
\n \n
\n \n
\n READY
state.AmazonNimbleStudio-StudioUser
managed policy\n attached for the portal to function properly.AmazonNimbleStudio-StudioAdmin
managed\n policy attached for the portal to function properly.StudioEncryptionConfiguration
.CreateStudio
to encrypt this data using a key you own and\n manage.DELETED
state.DELETED
.StreamingSessionBackup
resource.CreateStreamingSessionStream
.StreamingSessionStream
changes to the READY
state,\n the url property will contain a stream to be used with the DCV streaming client.STOPPED
state into the READY
\n state. The START_IN_PROGRESS
state is the intermediate state between the\n STOPPED
and READY
states.READY
state.READY
state into the STOPPED
\n state. The STOP_IN_PROGRESS
state is the intermediate state between the\n READY
and STOPPED
states.\n
\n AWS::CloudWatch::Metric
\n AWS::Logs::LogGroup
\n AWS::XRay::Trace
\n TagResource
action with a resource that already has tags. If you specify a new tag key for the alarm,\n this tag is appended to the list of tags associated\n with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces\n the previous value for that tag.oam:ResourceTag
permission. The\n iam:ResourceTag
permission does not allow you to tag and untag links and\n sinks.oam:ResourceTag
permission. The\n iam:TagResource
permission does not allow you to tag and untag links and\n sinks.TagResource
action with a pipe that already has tags. If\n you specify a new tag key, this tag is appended to the list of tags associated with the\n pipe. If you specify a tag key that is already associated with the pipe, the new tag\n value that you specify replaces the previous value for that tag.UpdatePipe
, only the fields that are included in the request are changed, the rest are unchanged. \n The exception to this is if you modify any Amazon Web Services-service specific fields in the SourceParameters
, EnrichmentParameters
, or \n TargetParameters
objects. The fields in these objects are updated atomically as one and override existing values. This is by design and means that \n if you don't specify an optional field in one of these Parameters objects, that field will be set to its system-default value after the update.GET
, you must have READ
\n access to the object. If you grant READ
access to the anonymous user, you can\n return the object without using an authorization header.sample.jpg
,\n you can name it photos/2006/February/sample.jpg
.GET
operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the resource as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host\n Header Bucket Specification.InvalidObjectState
error. For information about restoring archived objects,\n see Restoring\n Archived Objects.x-amz-server-side-encryption
, should not\n be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use\n these types of keys, you’ll get an HTTP 400 Bad Request error.\n
\n x-amz-server-side-encryption-customer-algorithm
\n x-amz-server-side-encryption-customer-key
\n x-amz-server-side-encryption-customer-key-MD5
\n x-amz-tagging-count
header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.\n
\n s3:ListBucket
permission.s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 (Not Found) error.s3:ListBucket
permission, Amazon S3 returns an\n HTTP status code 403 (\"access denied\") error.GET
action returns the current version of an object. To return a\n different version, use the versionId
subresource.\n
\n versionId
, you need the\n s3:GetObjectVersion
permission to access a specific version of an\n object. If you request a specific version, you do not need to have the\n s3:GetObject
permission. If you request the current version\n without a specific version ID, only s3:GetObject
permission is\n required. s3:GetObjectVersion
permission won't be required.x-amz-delete-marker: true
in the\n response.GET
\n response. For example, you might override the Content-Disposition
response\n header value in your GET
request.GET
response are Content-Type
,\n Content-Language
, Expires
, Cache-Control
,\n Content-Disposition
, and Content-Encoding
. To override these\n header values in the GET
response, you use the following request parameters.\n
\n response-content-type
\n response-content-language
\n response-expires
\n response-cache-control
\n response-content-disposition
\n response-content-encoding
\n If-Match
and If-Unmodified-Since
headers are\n present in the request as follows: If-Match
condition evaluates to\n true
, and; If-Unmodified-Since
condition evaluates to\n false
; then, S3 returns 200 OK and the data requested. If-None-Match
and If-Modified-Since
headers are\n present in the request as follows: If-None-Match
condition evaluates to\n false
, and; If-Modified-Since
condition evaluates to\n true
; then, S3 returns 304 Not Modified response code.GetObject
:\n
",
"smithy.api#http": {
@@ -26872,16 +26887,32 @@
"CORSConfiguration": {
"CORSRules": [
{
- "AllowedOrigins": ["http://www.example.com"],
- "AllowedHeaders": ["*"],
- "AllowedMethods": ["PUT", "POST", "DELETE"],
+ "AllowedOrigins": [
+ "http://www.example.com"
+ ],
+ "AllowedHeaders": [
+ "*"
+ ],
+ "AllowedMethods": [
+ "PUT",
+ "POST",
+ "DELETE"
+ ],
"MaxAgeSeconds": 3000,
- "ExposeHeaders": ["x-amz-server-side-encryption"]
+ "ExposeHeaders": [
+ "x-amz-server-side-encryption"
+ ]
},
{
- "AllowedOrigins": ["*"],
- "AllowedHeaders": ["Authorization"],
- "AllowedMethods": ["GET"],
+ "AllowedOrigins": [
+ "*"
+ ],
+ "AllowedHeaders": [
+ "Authorization"
+ ],
+ "AllowedMethods": [
+ "GET"
+ ],
"MaxAgeSeconds": 3000
}
]
@@ -27393,7 +27424,9 @@
"TopicConfigurations": [
{
"TopicArn": "arn:aws:sns:us-west-2:123456789012:s3-notification-topic",
- "Events": ["s3:ObjectCreated:*"]
+ "Events": [
+ "s3:ObjectCreated:*"
+ ]
}
]
}
@@ -31447,7 +31480,9 @@
},
"traits": {
"aws.auth#unsignedPayload": {},
- "smithy.api#auth": ["aws.auth#sigv4"],
+ "smithy.api#auth": [
+ "aws.auth#sigv4"
+ ],
"smithy.api#documentation": "GetObject
operation when using Object Lambda access points. For\n information about Object Lambda access points, see Transforming objects with\n Object Lambda access points in the Amazon S3 User Guide.RequestRoute
, RequestToken
, StatusCode
,\n ErrorCode
, and ErrorMessage
. The GetObject
\n response metadata is supported so that the WriteGetObjectResponse
caller,\n typically an Lambda function, can provide the same metadata when it internally invokes\n GetObject
. When WriteGetObjectResponse
is called by a\n customer-owned Lambda function, the metadata returned to the end user\n GetObject
call might differ from what Amazon S3 would normally return.x-amz-meta
. For example,\n x-amz-meta-my-custom-header: MyCustomValue
. The primary use case for this\n is to forward GetObject
metadata.TrainingJobStatus
is Failed
and,\n depending on the FailureReason
, attributes like\n TrainingStartTime
, TrainingTimeInSeconds
,\n TrainingEndTime
, and BillableTimeInSeconds
may not be\n present in the response.UpdateSchedule
, EventBridge Scheduler uses all values, including empty values, specified in the request and\n overrides the existing schedule. This is by design. This means that if you do not set an optional field in your request, that field will be set to\n its system-default value after the update.\n GetSchedule
API operation and make a note of all optional parameters\n for your UpdateSchedule
call.\n Custom\n Event
.ListTimelineEvents
to find an\n event's ID.TimelineEvent
.RelatedItem
referenced in a TimelineEvent
.ListTimelineEvents
.createdBy
. If no\n service principal assumed the role this will be left blank.sortBy
\n field.aws.
\" Customer-generated events can have any value here,\n as long as it doesn't begin with \"aws.
\" We recommend the use of Java package-name\n style reverse domain-name strings. Open
or\n Resolved
.ListTimelineEvents
to find an\n event's ID.Custom Event
.\n
",
"smithy.api#title": "Amazon Transcribe Streaming Service",
@@ -3315,4 +3320,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/transcribe.json b/codegen/sdk-codegen/aws-models/transcribe.json
index dd658438cea4d..193e38f15b152 100644
--- a/codegen/sdk-codegen/aws-models/transcribe.json
+++ b/codegen/sdk-codegen/aws-models/transcribe.json
@@ -7092,4 +7092,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/transfer.json b/codegen/sdk-codegen/aws-models/transfer.json
index f7329826b3fdc..087b7036811d7 100644
--- a/codegen/sdk-codegen/aws-models/transfer.json
+++ b/codegen/sdk-codegen/aws-models/transfer.json
@@ -9387,4 +9387,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/translate.json b/codegen/sdk-codegen/aws-models/translate.json
index f0f6cc2cc32e1..9ead21ef2658d 100644
--- a/codegen/sdk-codegen/aws-models/translate.json
+++ b/codegen/sdk-codegen/aws-models/translate.json
@@ -3925,4 +3925,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/verifiedpermissions.json b/codegen/sdk-codegen/aws-models/verifiedpermissions.json
index 6f184f6b7752a..4ab01f58bbf1d 100644
--- a/codegen/sdk-codegen/aws-models/verifiedpermissions.json
+++ b/codegen/sdk-codegen/aws-models/verifiedpermissions.json
@@ -3660,7 +3660,12 @@
"aws.auth#sigv4": {
"name": "verifiedpermissions"
},
- "aws.iam#supportedPrincipalTypes": ["Root", "IAMUser", "IAMRole", "FederatedUser"],
+ "aws.iam#supportedPrincipalTypes": [
+ "Root",
+ "IAMUser",
+ "IAMRole",
+ "FederatedUser"
+ ],
"aws.protocols#awsJson1_0": {},
"smithy.api#documentation": "\n
\n jane
leaves the company, and you later\n let someone else use the name jane
, then that new user\n automatically gets access to everything granted by policies that still\n reference User::\"jane\"
. Cedar can’t distinguish between the\n new user and the old. This applies to both principal and resource\n identifiers. Always use identifiers that are guaranteed unique and never\n reused to ensure that you don’t unintentionally grant access because of the\n presence of an old identifier in a policy.\n
",
"smithy.api#title": "Amazon Verified Permissions",
@@ -4331,4 +4336,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/codegen/sdk-codegen/aws-models/voice-id.json b/codegen/sdk-codegen/aws-models/voice-id.json
index bd749749bd3c2..7c0789b936d6f 100644
--- a/codegen/sdk-codegen/aws-models/voice-id.json
+++ b/codegen/sdk-codegen/aws-models/voice-id.json
@@ -352,7 +352,9 @@
"aws.cloudformation#cfnExcludeProperty": {},
"smithy.api#documentation": "Detail
are used in\n Get
operations.Item
are used in\n List
operations.(
+ request: __HttpRequest,
+ context: Context,
+ operationName: O,
+ serializer: __OperationSerializer,
+ operation: __Operation<__OperationInput, __OperationOutput, Context>,
+ serializeFrameworkException: (e: __SmithyFrameworkException, ctx: __ServerSerdeContext) => Promise<__HttpResponse>,
+ validationFn: (input: __OperationInput) => __ValidationFailure[],
+ validationCustomizer: __ValidationCustomizer