diff --git a/.changes/1.35.55.json b/.changes/1.35.55.json new file mode 100644 index 0000000000..55be857ebf --- /dev/null +++ b/.changes/1.35.55.json @@ -0,0 +1,37 @@ +[ + { + "category": "``codebuild``", + "description": "AWS CodeBuild now adds additional compute types for reserved capacity fleet.", + "type": "api-change" + }, + { + "category": "``guardduty``", + "description": "GuardDuty RDS Protection expands support for Amazon Aurora PostgreSQL Limitless Databases.", + "type": "api-change" + }, + { + "category": "``lakeformation``", + "description": "API changes for new named tag expressions feature.", + "type": "api-change" + }, + { + "category": "``qapps``", + "description": "Introduces category apis in AmazonQApps. Web experience users use Categories to tag and filter library items.", + "type": "api-change" + }, + { + "category": "``s3control``", + "description": "Fix ListStorageLensConfigurations and ListStorageLensGroups deserialization for Smithy SDKs.", + "type": "api-change" + }, + { + "category": "``verifiedpermissions``", + "description": "Adding BatchGetPolicy API which supports the retrieval of multiple policies across multiple policy stores within a single request.", + "type": "api-change" + }, + { + "category": "protocol", + "description": "Added support for header enabling service migration off the AWS Query protocol.", + "type": "enhancement" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6c580cc658..5180642646 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,18 @@ CHANGELOG ========= +1.35.55 +======= + +* api-change:``codebuild``: AWS CodeBuild now adds additional compute types for reserved capacity fleet. +* api-change:``guardduty``: GuardDuty RDS Protection expands support for Amazon Aurora PostgreSQL Limitless Databases. +* api-change:``lakeformation``: API changes for new named tag expressions feature. +* api-change:``qapps``: Introduces category apis in AmazonQApps. Web experience users use Categories to tag and filter library items. +* api-change:``s3control``: Fix ListStorageLensConfigurations and ListStorageLensGroups deserialization for Smithy SDKs. +* api-change:``verifiedpermissions``: Adding BatchGetPolicy API which supports the retrieval of multiple policies across multiple policy stores within a single request. +* enhancement:protocol: Added support for header enabling service migration off the AWS Query protocol. + + 1.35.54 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 7d30fabaf8..4585bfecbb 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.54' +__version__ = '1.35.55' class NullHandler(logging.Handler): diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index c4090a825c..031467a8d8 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -1580,6 +1580,28 @@ "type":"list", "member":{"shape":"CodeCoverage"} }, + "ComputeConfiguration":{ + "type":"structure", + "members":{ + "vCpu":{ + "shape":"WrapperLong", + "documentation":"
The number of vCPUs of the instance type included in your fleet.
" + }, + "memory":{ + "shape":"WrapperLong", + "documentation":"The amount of memory of the instance type included in your fleet.
" + }, + "disk":{ + "shape":"WrapperLong", + "documentation":"The amount of disk space of the instance type included in your fleet.
" + }, + "machineType":{ + "shape":"MachineType", + "documentation":"The machine type of the instance type included in your fleet.
" + } + }, + "documentation":"Contains compute attributes. These attributes only need be specified when your project's or fleet's computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
Information about the compute resources the compute fleet uses. Available values include:
BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build environment compute types in the CodeBuild User Guide.
" + "documentation":"Information about the compute resources the compute fleet uses. Available values include:
ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.
BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see On-demand environment types in the CodeBuild User Guide.
" + }, + "computeConfiguration":{ + "shape":"ComputeConfiguration", + "documentation":"The compute configuration of the compute fleet. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
Information about the compute resources the compute fleet uses. Available values include:
BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build environment compute types in the CodeBuild User Guide.
" + "documentation":"Information about the compute resources the compute fleet uses. Available values include:
ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.
BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see On-demand environment types in the CodeBuild User Guide.
" + }, + "computeConfiguration":{ + "shape":"ComputeConfiguration", + "documentation":"The compute configuration of the compute fleet. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
Information about build logs in CloudWatch Logs.
" }, + "MachineType":{ + "type":"string", + "enum":[ + "GENERAL", + "NVME" + ] + }, "NetworkInterface":{ "type":"structure", "members":{ @@ -3379,7 +3417,11 @@ }, "computeType":{ "shape":"ComputeType", - "documentation":"Information about the compute resources the build project uses. Available values include:
BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
BUILD_LAMBDA_1GB
: Use up to 1 GB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_2GB
: Use up to 2 GB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_4GB
: Use up to 4 GB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_8GB
: Use up to 8 GB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_10GB
: Use up to 10 GB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.
If you're using compute fleets during project creation, computeType
will be ignored.
For more information, see Build Environment Compute Types in the CodeBuild User Guide.
" + "documentation":"Information about the compute resources the build project uses. Available values include:
ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.
BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see On-demand environment types in the CodeBuild User Guide.
" + }, + "computeConfiguration":{ + "shape":"ComputeConfiguration", + "documentation":"The compute configuration of the build project. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
Information about the compute resources the compute fleet uses. Available values include:
BUILD_GENERAL1_SMALL
: Use up to 3 GB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 7 GB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 3 GB memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 15 GB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see Build environment compute types in the CodeBuild User Guide.
" + "documentation":"Information about the compute resources the compute fleet uses. Available values include:
ATTRIBUTE_BASED_COMPUTE
: Specify the amount of vCPUs, memory, disk space, and the type of machine.
If you use ATTRIBUTE_BASED_COMPUTE
, you must define your attributes by using computeConfiguration
. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.
BUILD_GENERAL1_SMALL
: Use up to 4 GiB memory and 2 vCPUs for builds.
BUILD_GENERAL1_MEDIUM
: Use up to 8 GiB memory and 4 vCPUs for builds.
BUILD_GENERAL1_LARGE
: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_XLARGE
: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.
BUILD_GENERAL1_2XLARGE
: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.
BUILD_LAMBDA_1GB
: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_2GB
: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_4GB
: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_8GB
: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
BUILD_LAMBDA_10GB
: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER
and ARM_LAMBDA_CONTAINER
.
If you use BUILD_GENERAL1_SMALL
:
For environment type LINUX_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.
For environment type ARM_CONTAINER
, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.
If you use BUILD_GENERAL1_LARGE
:
For environment type LINUX_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs for builds.
For environment type LINUX_GPU_CONTAINER
, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.
For environment type ARM_CONTAINER
, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.
For more information, see On-demand environment types in the CodeBuild User Guide.
" + }, + "computeConfiguration":{ + "shape":"ComputeConfiguration", + "documentation":"The compute configuration of the compute fleet. This is only required if computeType
is set to ATTRIBUTE_BASED_COMPUTE
.
The unique ID of the detector of the GuardDuty member account.
", + "documentation":"The unique ID of the detector of the GuardDuty member account.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the detector that specifies the GuardDuty service whose findings you want to archive.
", + "documentation":"The ID of the detector that specifies the GuardDuty service whose findings you want to archive.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The detector ID associated with the GuardDuty account for which you want to create a filter.
", + "documentation":"The detector ID associated with the GuardDuty account for which you want to create a filter.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector of the GuardDuty account for which you want to create an IPSet.
", + "documentation":"The unique ID of the detector of the GuardDuty account for which you want to create an IPSet.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector of the GuardDuty account for which you want to associate member accounts.
", + "documentation":"The unique ID of the detector of the GuardDuty account for which you want to associate member accounts.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the GuardDuty detector associated with the publishing destination.
", + "documentation":"The ID of the GuardDuty detector associated with the publishing destination.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the detector for which you need to create sample findings.
", + "documentation":"The ID of the detector for which you need to create sample findings.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector of the GuardDuty account for which you want to create a ThreatIntelSet
.
The unique ID of the detector of the GuardDuty account for which you want to create a ThreatIntelSet
.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that you want to delete.
", + "documentation":"The unique ID of the detector that you want to delete.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with the filter.
", + "documentation":"The unique ID of the detector that is associated with the filter.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector associated with the IPSet.
", + "documentation":"The unique ID of the detector associated with the IPSet.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector of the GuardDuty account whose members you want to delete.
", + "documentation":"The unique ID of the detector of the GuardDuty account whose members you want to delete.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector associated with the publishing destination to delete.
", + "documentation":"The unique ID of the detector associated with the publishing destination to delete.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with the threatIntelSet.
", + "documentation":"The unique ID of the detector that is associated with the threatIntelSet.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that the request is associated with.
", + "documentation":"The unique ID of the detector that the request is associated with.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The detector ID of the delegated administrator for which you need to retrieve the information.
", + "documentation":"The detector ID of the delegated administrator for which you need to retrieve the information.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector associated with the publishing destination to retrieve.
", + "documentation":"The unique ID of the detector associated with the publishing destination to retrieve.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the GuardDuty detector.
", + "documentation":"The unique ID of the GuardDuty detector.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that you want to get.
", + "documentation":"The unique ID of the detector that you want to get.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with this filter.
", + "documentation":"The unique ID of the detector that is associated with this filter.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the detector that specifies the GuardDuty service whose findings you want to retrieve.
", + "documentation":"The ID of the detector that specifies the GuardDuty service whose findings you want to retrieve.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the detector whose findings statistics you want to retrieve.
", + "documentation":"The ID of the detector whose findings statistics you want to retrieve.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with the IPSet.
", + "documentation":"The unique ID of the detector that is associated with the IPSet.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with this scan.
", + "documentation":"The unique ID of the detector that is associated with this scan.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector of the GuardDuty member account.
", + "documentation":"The unique ID of the detector of the GuardDuty member account.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The detector ID for the administrator account.
", + "documentation":"The detector ID for the administrator account.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector of the GuardDuty account whose members you want to retrieve.
", + "documentation":"The unique ID of the detector of the GuardDuty account whose members you want to retrieve.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector of the GuardDuty member account.
", + "documentation":"The unique ID of the detector of the GuardDuty member account.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with the threatIntelSet.
", + "documentation":"The unique ID of the detector that is associated with the threatIntelSet.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the detector that specifies the GuardDuty service whose usage statistics you want to retrieve.
", + "documentation":"The ID of the detector that specifies the GuardDuty service whose usage statistics you want to retrieve.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector of the GuardDuty account with which you want to invite members.
", + "documentation":"The unique ID of the detector of the GuardDuty account with which you want to invite members.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector whose coverage details you want to retrieve.
", + "documentation":"The unique ID of the detector whose coverage details you want to retrieve.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with the filter.
", + "documentation":"The unique ID of the detector that is associated with the filter.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the detector that specifies the GuardDuty service whose findings you want to list.
", + "documentation":"The ID of the detector that specifies the GuardDuty service whose findings you want to list.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with IPSet.
", + "documentation":"The unique ID of the detector that is associated with IPSet.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with the member.
", + "documentation":"The unique ID of the detector that is associated with the member.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The detector ID for which you want to retrieve the publishing destination.
", + "documentation":"The detector ID for which you want to retrieve the publishing destination.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that is associated with the threatIntelSet.
", + "documentation":"The unique ID of the detector that is associated with the threatIntelSet.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
Instance tag key-value pairs associated with the database instance ID.
", + "documentation":"Information about the tag key-value pairs.
", "locationName":"tags" } }, @@ -7396,6 +7396,47 @@ }, "documentation":"Contains information about the user and authentication details for a database instance involved in the finding.
" }, + "RdsLimitlessDbDetails":{ + "type":"structure", + "members":{ + "DbShardGroupIdentifier":{ + "shape":"String", + "documentation":"The name associated with the Limitless DB shard group.
", + "locationName":"dbShardGroupIdentifier" + }, + "DbShardGroupResourceId":{ + "shape":"String", + "documentation":"The resource identifier of the DB shard group within the Limitless Database.
", + "locationName":"dbShardGroupResourceId" + }, + "DbShardGroupArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) that identifies the DB shard group.
", + "locationName":"dbShardGroupArn" + }, + "Engine":{ + "shape":"String", + "documentation":"The database engine of the database instance involved in the finding.
", + "locationName":"engine" + }, + "EngineVersion":{ + "shape":"String", + "documentation":"The version of the database engine.
", + "locationName":"engineVersion" + }, + "DbClusterIdentifier":{ + "shape":"String", + "documentation":"The name of the database cluster that is a part of the Limitless Database.
", + "locationName":"dbClusterIdentifier" + }, + "Tags":{ + "shape":"Tags", + "documentation":"Information about the tag-key value pair.
", + "locationName":"tags" + } + }, + "documentation":"Contains information about the resource type RDSLimitlessDB
that is involved in a GuardDuty finding.
Contains information about the database instance to which an anomalous login attempt was made.
", "locationName":"rdsDbInstanceDetails" }, + "RdsLimitlessDbDetails":{ + "shape":"RdsLimitlessDbDetails", + "documentation":"Contains information about the RDS Limitless database that was involved in a GuardDuty finding.
", + "locationName":"rdsLimitlessDbDetails" + }, "RdsDbUserDetails":{ "shape":"RdsDbUserDetails", "documentation":"Contains information about the user details through which anomalous login attempt was made.
", @@ -7885,12 +7931,12 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"The unique ID of the detector that the request is associated with.
", + "documentation":"The unique ID of the detector that the request is associated with.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique detector ID of the administrator account that the request is associated with. If the account is an administrator, the AdminDetectorId
will be the same as the one used for DetectorId
.
The unique detector ID of the administrator account that the request is associated with. If the account is an administrator, the AdminDetectorId
will be the same as the one used for DetectorId
.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector of the GuardDuty administrator account associated with the member accounts to monitor.
", + "documentation":"The unique ID of the detector of the GuardDuty administrator account associated with the member accounts to monitor.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector associated with the GuardDuty administrator account that is monitoring member accounts.
", + "documentation":"The unique ID of the detector associated with the GuardDuty administrator account that is monitoring member accounts.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The EC2 instance tag key.
", + "documentation":"Describes the key associated with the tag.
", "locationName":"key" }, "Value":{ "shape":"String", - "documentation":"The EC2 instance tag value.
", + "documentation":"Describes the value associated with the tag key.
", "locationName":"value" } }, - "documentation":"Contains information about a tag associated with the EC2 instance.
" + "documentation":"Contains information about a tag key-value pair.
" }, "TagKey":{ "type":"string", @@ -8693,7 +8739,7 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"The ID of the detector associated with the findings to unarchive.
", + "documentation":"The ID of the detector associated with the findings to unarchive.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector to update.
", + "documentation":"The unique ID of the detector to update.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that specifies the GuardDuty service where you want to update a filter.
", + "documentation":"The unique ID of the detector that specifies the GuardDuty service where you want to update a filter.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the detector that is associated with the findings for which you want to update the feedback.
", + "documentation":"The ID of the detector that is associated with the findings for which you want to update the feedback.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The detectorID that specifies the GuardDuty service whose IPSet you want to update.
", + "documentation":"The detectorID that specifies the GuardDuty service whose IPSet you want to update.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The unique ID of the detector that specifies the GuardDuty service where you want to update scan settings.
", + "documentation":"The unique ID of the detector that specifies the GuardDuty service where you want to update scan settings.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The detector ID of the administrator account.
", + "documentation":"The detector ID of the administrator account.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the detector that configures the delegated administrator.
", + "documentation":"The ID of the detector that configures the delegated administrator.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The ID of the detector associated with the publishing destinations to update.
", + "documentation":"The ID of the detector associated with the publishing destinations to update.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
The detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to update.
", + "documentation":"The detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to update.
To find the detectorId
in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.
Creates an LF-tag with the specified name and values.
" }, + "CreateLFTagExpression":{ + "name":"CreateLFTagExpression", + "http":{ + "method":"POST", + "requestUri":"/CreateLFTagExpression" + }, + "input":{"shape":"CreateLFTagExpressionRequest"}, + "output":{"shape":"CreateLFTagExpressionResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"Creates a new LF-Tag expression with the provided name, description, catalog ID, and expression body. This call fails if a LF-Tag expression with the same name already exists in the caller’s account or if the underlying LF-Tags don't exist. To call this API operation, caller needs the following Lake Formation permissions:
CREATE_LF_TAG_EXPRESSION
on the root catalog resource.
GRANT_WITH_LF_TAG_EXPRESSION
on all underlying LF-Tag key:value pairs included in the expression.
Deletes the specified LF-tag given a key name. If the input parameter tag key was not found, then the operation will throw an exception. When you delete an LF-tag, the LFTagPolicy
attached to the LF-tag becomes invalid. If the deleted LF-tag was still assigned to any resource, the tag policy attach to the deleted LF-tag will no longer be applied to the resource.
Deletes the LF-Tag expression. The caller must be a data lake admin or have DROP
permissions on the LF-Tag expression. Deleting a LF-Tag expression will also delete all LFTagPolicy
permissions referencing the LF-Tag expression.
Returns an LF-tag definition.
" }, + "GetLFTagExpression":{ + "name":"GetLFTagExpression", + "http":{ + "method":"POST", + "requestUri":"/GetLFTagExpression" + }, + "input":{"shape":"GetLFTagExpressionRequest"}, + "output":{"shape":"GetLFTagExpressionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Returns the details about the LF-Tag expression. The caller must be a data lake admin or must have DESCRIBE
permission on the LF-Tag expression resource.
Allows a caller in a secure environment to assume a role with permission to access Amazon S3. In order to vend such credentials, Lake Formation assumes the role associated with a registered location, for example an Amazon S3 bucket, with a scope down policy which restricts the access to a single prefix.
" + "documentation":"Allows a caller in a secure environment to assume a role with permission to access Amazon S3. In order to vend such credentials, Lake Formation assumes the role associated with a registered location, for example an Amazon S3 bucket, with a scope down policy which restricts the access to a single prefix.
To call this API, the role that the service assumes must have lakeformation:GetDataAccess
permission on the resource.
Lists all the data cell filters on a table.
" }, + "ListLFTagExpressions":{ + "name":"ListLFTagExpressions", + "http":{ + "method":"POST", + "requestUri":"/ListLFTagExpressions" + }, + "input":{"shape":"ListLFTagExpressionsRequest"}, + "output":{"shape":"ListLFTagExpressionsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Returns the LF-Tag expressions in caller’s account filtered based on caller's permissions. Data Lake and read only admins implicitly can see all tag expressions in their account, else caller needs DESCRIBE permissions on tag expression.
" + }, "ListLFTags":{ "name":"ListLFTags", "http":{ @@ -744,7 +814,7 @@ {"shape":"ResourceNumberLimitExceededException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Registers the resource as managed by the Data Catalog.
To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.
The following request registers a new location and gives Lake Formation permission to use the service-linked role to access that location.
ResourceArn = arn:aws:s3:::my-bucket UseServiceLinkedRole = true
If UseServiceLinkedRole
is not set to true, you must provide or set the RoleArn
:
arn:aws:iam::12345:role/my-data-access-role
Registers the resource as managed by the Data Catalog.
To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.
The following request registers a new location and gives Lake Formation permission to use the service-linked role to access that location.
ResourceArn = arn:aws:s3:::my-bucket/ UseServiceLinkedRole = true
If UseServiceLinkedRole
is not set to true, you must provide or set the RoleArn
:
arn:aws:iam::12345:role/my-data-access-role
Updates the list of possible values for the specified LF-tag key. If the LF-tag does not exist, the operation throws an EntityNotFoundException. The values in the delete key values will be deleted from list of possible values. If any value in the delete key values is attached to a resource, then API errors out with a 400 Exception - \"Update not allowed\". Untag the attribute before deleting the LF-tag key's value.
" }, + "UpdateLFTagExpression":{ + "name":"UpdateLFTagExpression", + "http":{ + "method":"POST", + "requestUri":"/UpdateLFTagExpression" + }, + "input":{"shape":"UpdateLFTagExpressionRequest"}, + "output":{"shape":"UpdateLFTagExpressionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Updates the name of the LF-Tag expression to the new description and expression body provided. Updating a LF-Tag expression immediately changes the permission boundaries of all existing LFTagPolicy
permission grants that reference the given LF-Tag expression.
A name for the expression.
" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"A description with information about the LF-Tag expression.
" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.
" + }, + "Expression":{ + "shape":"Expression", + "documentation":"A list of LF-Tag conditions (key-value pairs).
" + } + } + }, + "CreateLFTagExpressionResponse":{ + "type":"structure", + "members":{ + } + }, "CreateLFTagRequest":{ "type":"structure", "required":[ @@ -1532,7 +1650,8 @@ "LF_TAG", "LF_TAG_POLICY", "LF_TAG_POLICY_DATABASE", - "LF_TAG_POLICY_TABLE" + "LF_TAG_POLICY_TABLE", + "LF_NAMED_TAG_EXPRESSION" ] }, "DataLakeSettings":{ @@ -1645,6 +1764,25 @@ "members":{ } }, + "DeleteLFTagExpressionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"The name for the LF-Tag expression.
" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"The identifier for the Data Catalog. By default, the account ID in which the LF-Tag expression is saved.
" + } + } + }, + "DeleteLFTagExpressionResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteLFTagRequest":{ "type":"structure", "required":["TagKey"], @@ -2100,6 +2238,41 @@ } } }, + "GetLFTagExpressionRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"The name for the LF-Tag expression
" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"The identifier for the Data Catalog. By default, the account ID.
" + } + } + }, + "GetLFTagExpressionResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"The name for the LF-Tag expression.
" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"The description with information about the LF-Tag expression.
" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"The identifier for the Data Catalog. By default, the account ID in which the LF-Tag expression is saved.
" + }, + "Expression":{ + "shape":"Expression", + "documentation":"The body of the LF-Tag expression. It is composed of one or more LF-Tag key-value pairs.
" + } + } + }, "GetLFTagRequest":{ "type":"structure", "required":["TagKey"], @@ -2623,6 +2796,47 @@ "type":"list", "member":{"shape":"LFTagError"} }, + "LFTagExpression":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"The name for saved the LF-Tag expression.
" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"A structure that contains information about the LF-Tag expression.
" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"The identifier for the Data Catalog. By default, the account ID.
" + }, + "Expression":{ + "shape":"Expression", + "documentation":"A logical expression composed of one or more LF-Tags.
" + } + }, + "documentation":"A structure consists LF-Tag expression name and catalog ID.
" + }, + "LFTagExpressionResource":{ + "type":"structure", + "required":["Name"], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"The identifier for the Data Catalog. By default, the account ID.
" + }, + "Name":{ + "shape":"NameString", + "documentation":"The name of the LF-Tag expression to grant permissions on.
" + } + }, + "documentation":"A structure containing a LF-Tag expression (keys and values).
" + }, + "LFTagExpressionsList":{ + "type":"list", + "member":{"shape":"LFTagExpression"} + }, "LFTagKey":{ "type":"string", "max":128, @@ -2675,10 +2889,7 @@ }, "LFTagPolicyResource":{ "type":"structure", - "required":[ - "ResourceType", - "Expression" - ], + "required":["ResourceType"], "members":{ "CatalogId":{ "shape":"CatalogIdString", @@ -2690,10 +2901,14 @@ }, "Expression":{ "shape":"Expression", - "documentation":"A list of LF-tag conditions that apply to the resource's LF-tag policy.
" + "documentation":"A list of LF-tag conditions or a saved expression that apply to the resource's LF-tag policy.
" + }, + "ExpressionName":{ + "shape":"NameString", + "documentation":"If provided, permissions are granted to the Data Catalog resources whose assigned LF-Tags match the expression body of the saved expression under the provided ExpressionName
.
A structure containing a list of LF-tag conditions that apply to a resource's LF-tag policy.
" + "documentation":"A structure containing a list of LF-tag conditions or saved LF-Tag expressions that apply to a resource's LF-tag policy.
" }, "LFTagValue":{ "type":"string", @@ -2758,6 +2973,36 @@ } } }, + "ListLFTagExpressionsRequest":{ + "type":"structure", + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"The identifier for the Data Catalog. By default, the account ID.
" + }, + "MaxResults":{ + "shape":"PageSize", + "documentation":"The maximum number of results to return.
" + }, + "NextToken":{ + "shape":"Token", + "documentation":"A continuation token, if this is not the first call to retrieve this list.
" + } + } + }, + "ListLFTagExpressionsResponse":{ + "type":"structure", + "members":{ + "LFTagExpressions":{ + "shape":"LFTagExpressionsList", + "documentation":"Logical expressions composed of one more LF-Tag key-value pairs.
" + }, + "NextToken":{ + "shape":"Token", + "documentation":"A continuation token, if this is not the first call to retrieve this list.
" + } + } + }, "ListLFTagsRequest":{ "type":"structure", "members":{ @@ -3091,7 +3336,8 @@ "DATA_LOCATION_ACCESS", "CREATE_LF_TAG", "ASSOCIATE", - "GRANT_WITH_LF_TAG_EXPRESSION" + "GRANT_WITH_LF_TAG_EXPRESSION", + "CREATE_LF_TAG_EXPRESSION" ] }, "PermissionList":{ @@ -3396,7 +3642,11 @@ }, "LFTagPolicy":{ "shape":"LFTagPolicyResource", - "documentation":"A list of LF-tag conditions that define a resource's LF-tag policy.
" + "documentation":"A list of LF-tag conditions or saved LF-Tag expressions that define a resource's LF-tag policy.
" + }, + "LFTagExpression":{ + "shape":"LFTagExpressionResource", + "documentation":"LF-Tag expression resource. A logical expression composed of one or more LF-Tag key:value pairs.
" } }, "documentation":"A structure for the resource.
" @@ -4008,6 +4258,36 @@ "members":{ } }, + "UpdateLFTagExpressionRequest":{ + "type":"structure", + "required":[ + "Name", + "Expression" + ], + "members":{ + "Name":{ + "shape":"NameString", + "documentation":"The name for the LF-Tag expression.
" + }, + "Description":{ + "shape":"DescriptionString", + "documentation":"The description with information about the saved LF-Tag expression.
" + }, + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"The identifier for the Data Catalog. By default, the account ID.
" + }, + "Expression":{ + "shape":"Expression", + "documentation":"The LF-Tag expression body composed of one more LF-Tag key-value pairs.
" + } + } + }, + "UpdateLFTagExpressionResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateLFTagRequest":{ "type":"structure", "required":["TagKey"], @@ -4148,7 +4428,7 @@ }, "StorageOptimizerConfig":{ "shape":"StorageOptimizerConfigMap", - "documentation":"Name of the table for which to enable the storage optimizer.
" + "documentation":"Name of the configuration for the storage optimizer.
" } } }, diff --git a/botocore/data/qapps/2023-11-27/service-2.json b/botocore/data/qapps/2023-11-27/service-2.json index cce8be26c8..a2bf087de1 100644 --- a/botocore/data/qapps/2023-11-27/service-2.json +++ b/botocore/data/qapps/2023-11-27/service-2.json @@ -52,6 +52,63 @@ ], "documentation":"This operation creates a link between the user's identity calling the operation and a specific Q App. This is useful to mark the Q App as a favorite for the user if the user doesn't own the Amazon Q App so they can still run it and see it in their inventory of Q Apps.
" }, + "BatchCreateCategory":{ + "name":"BatchCreateCategory", + "http":{ + "method":"POST", + "requestUri":"/catalog.createCategories", + "responseCode":200 + }, + "input":{"shape":"BatchCreateCategoryInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Creates Categories for the Amazon Q Business application environment instance. Web experience users use Categories to tag and filter library items. For more information, see Custom labels for Amazon Q Apps.
" + }, + "BatchDeleteCategory":{ + "name":"BatchDeleteCategory", + "http":{ + "method":"POST", + "requestUri":"/catalog.deleteCategories", + "responseCode":200 + }, + "input":{"shape":"BatchDeleteCategoryInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Deletes Categories for the Amazon Q Business application environment instance. Web experience users use Categories to tag and filter library items. For more information, see Custom labels for Amazon Q Apps.
" + }, + "BatchUpdateCategory":{ + "name":"BatchUpdateCategory", + "http":{ + "method":"POST", + "requestUri":"/catalog.updateCategories", + "responseCode":200 + }, + "input":{"shape":"BatchUpdateCategoryInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"Updates Categories for the Amazon Q Business application environment instance. Web experience users use Categories to tag and filter library items. For more information, see Custom labels for Amazon Q Apps.
" + }, "CreateLibraryItem":{ "name":"CreateLibraryItem", "http":{ @@ -249,6 +306,25 @@ ], "documentation":"Uploads a file that can then be used either as a default in a FileUploadCard
from Q App definition or as a file that is used inside a single Q App run. The purpose of the document is determined by a scope parameter that indicates whether it is at the app definition level or at the app session level.
Lists the categories of a Amazon Q Business application environment instance. For more information, see Custom labels for Amazon Q Apps.
" + }, "ListLibraryItems":{ "name":"ListLibraryItems", "http":{ @@ -653,6 +729,100 @@ "type":"list", "member":{"shape":"AttributeFilter"} }, + "BatchCreateCategoryInput":{ + "type":"structure", + "required":[ + "instanceId", + "categories" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"The unique identifier of the Amazon Q Business application environment instance.
", + "location":"header", + "locationName":"instance-id" + }, + "categories":{ + "shape":"BatchCreateCategoryInputCategoryList", + "documentation":"The list of category objects to be created
" + } + } + }, + "BatchCreateCategoryInputCategory":{ + "type":"structure", + "required":["title"], + "members":{ + "id":{ + "shape":"UUID", + "documentation":"The unique identifier to be associated with a category. If you don't include a value, the category is automatically assigned a unique identifier.
" + }, + "title":{ + "shape":"BatchCreateCategoryInputCategoryTitleString", + "documentation":"The name of the category.
" + }, + "color":{ + "shape":"BatchCreateCategoryInputCategoryColorString", + "documentation":"The color to be associated with a category. The color must be a hexadecimal value of either 3 or 6 digits.
" + } + }, + "documentation":"The category object to be created.
" + }, + "BatchCreateCategoryInputCategoryColorString":{ + "type":"string", + "max":7, + "min":4, + "pattern":"#([A-Fa-f0-9]{3}|[A-Fa-f0-9]{6})" + }, + "BatchCreateCategoryInputCategoryList":{ + "type":"list", + "member":{"shape":"BatchCreateCategoryInputCategory"}, + "max":10, + "min":0 + }, + "BatchCreateCategoryInputCategoryTitleString":{ + "type":"string", + "max":30, + "min":1, + "pattern":"[a-zA-Z0-9_]+( [a-zA-Z0-9_]+)*" + }, + "BatchDeleteCategoryInput":{ + "type":"structure", + "required":[ + "instanceId", + "categories" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"The unique identifier of the Amazon Q Business application environment instance.
", + "location":"header", + "locationName":"instance-id" + }, + "categories":{ + "shape":"DeleteCategoryInputList", + "documentation":"The list of IDs of the categories to be deleted.
" + } + } + }, + "BatchUpdateCategoryInput":{ + "type":"structure", + "required":[ + "instanceId", + "categories" + ], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"The unique identifier of the Amazon Q Business application environment instance.
", + "location":"header", + "locationName":"instance-id" + }, + "categories":{ + "shape":"CategoryListInput", + "documentation":"The list of categories to be updated with their new values.
" + } + } + }, "Boolean":{ "type":"boolean", "box":true @@ -783,6 +953,12 @@ "max":5000, "min":0 }, + "CategoriesList":{ + "type":"list", + "member":{"shape":"Category"}, + "max":10, + "min":0 + }, "Category":{ "type":"structure", "required":[ @@ -797,6 +973,14 @@ "title":{ "shape":"String", "documentation":"The title or name of the category.
" + }, + "color":{ + "shape":"String", + "documentation":"The color of the category
" + }, + "appCount":{ + "shape":"Integer", + "documentation":"The number of published Amazon Q Apps associated with a category
" } }, "documentation":"A category used to classify and filter library items for Amazon Q Apps.
" @@ -807,12 +991,52 @@ "max":3, "min":0 }, + "CategoryInput":{ + "type":"structure", + "required":[ + "id", + "title" + ], + "members":{ + "id":{ + "shape":"UUID", + "documentation":"The unique identifier of the category.
" + }, + "title":{ + "shape":"CategoryInputTitleString", + "documentation":"The name of the category.
" + }, + "color":{ + "shape":"CategoryInputColorString", + "documentation":"The color of the category, represented as a hexadecimal value of either 3 or 6 digits.
" + } + }, + "documentation":"A label that web experience users associate with a library item. Web experience users use Categories to tag and filter library items.
" + }, + "CategoryInputColorString":{ + "type":"string", + "max":7, + "min":4, + "pattern":"#([A-Fa-f0-9]{3}|[A-Fa-f0-9]{6})" + }, + "CategoryInputTitleString":{ + "type":"string", + "max":30, + "min":1, + "pattern":"[a-zA-Z0-9_]+( [a-zA-Z0-9_]+)*" + }, "CategoryList":{ "type":"list", "member":{"shape":"Category"}, "max":3, "min":0 }, + "CategoryListInput":{ + "type":"list", + "member":{"shape":"CategoryInput"}, + "max":10, + "min":0 + }, "ConflictException":{ "type":"structure", "required":[ @@ -1060,6 +1284,12 @@ "max":500, "min":0 }, + "DeleteCategoryInputList":{ + "type":"list", + "member":{"shape":"UUID"}, + "max":10, + "min":0 + }, "DeleteLibraryItemInput":{ "type":"structure", "required":[ @@ -1687,6 +1917,27 @@ "DISABLED" ] }, + "ListCategoriesInput":{ + "type":"structure", + "required":["instanceId"], + "members":{ + "instanceId":{ + "shape":"InstanceId", + "documentation":"The unique identifier of the Amazon Q Business application environment instance.
", + "location":"header", + "locationName":"instance-id" + } + } + }, + "ListCategoriesOutput":{ + "type":"structure", + "members":{ + "categories":{ + "shape":"CategoriesList", + "documentation":"The categories of a Amazon Q Business application environment instance.
" + } + } + }, "ListLibraryItemsInput":{ "type":"structure", "required":["instanceId"], @@ -2367,7 +2618,7 @@ }, "UUID":{ "type":"string", - "pattern":"[\\da-f]{8}-[\\da-f]{4}-4[\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}" + "pattern":"[\\da-f]{8}-[\\da-f]{4}-[45][\\da-f]{3}-[89ABab][\\da-f]{3}-[\\da-f]{12}" }, "UnauthorizedException":{ "type":"structure", diff --git a/botocore/data/s3control/2018-08-20/service-2.json b/botocore/data/s3control/2018-08-20/service-2.json index 9a18ab4c16..44f1e6eb95 100644 --- a/botocore/data/s3control/2018-08-20/service-2.json +++ b/botocore/data/s3control/2018-08-20/service-2.json @@ -1765,7 +1765,7 @@ }, "VpcConfiguration":{ "shape":"VpcConfiguration", - "documentation":"The virtual private cloud (VPC) configuration for this access point, if one exists.
This element is empty if this access point is an Amazon S3 on Outposts access point that is used by other Amazon Web Servicesservices.
The virtual private cloud (VPC) configuration for this access point, if one exists.
This element is empty if this access point is an Amazon S3 on Outposts access point that is used by other Amazon Web Services services.
Contains the virtual private cloud (VPC) configuration for the specified access point.
This element is empty if this access point is an Amazon S3 on Outposts access point that is used by other Amazon Web Servicesservices.
Contains the virtual private cloud (VPC) configuration for the specified access point.
This element is empty if this access point is an Amazon S3 on Outposts access point that is used by other Amazon Web Services services.
A list of S3 Storage Lens configurations.
" + "documentation":"A list of S3 Storage Lens configurations.
", + "locationName":"StorageLensConfiguration" } - } + }, + "locationName":"ListStorageLensConfigurationResult" }, "ListStorageLensGroupEntry":{ "type":"structure", @@ -5852,7 +5854,8 @@ }, "StorageLensGroupList":{ "shape":"StorageLensGroupList", - "documentation":"The list of Storage Lens groups that exist in the specified home Region.
" + "documentation":"The list of Storage Lens groups that exist in the specified home Region.
", + "locationName":"StorageLensGroup" } } }, @@ -6498,7 +6501,7 @@ }, "RestrictPublicBuckets":{ "shape":"Setting", - "documentation":"Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to TRUE
restricts access to buckets with public policies to only Amazon Web Servicesservice principals and authorized users within this account.
Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.
This property is not supported for Amazon S3 on Outposts.
", + "documentation":"Specifies whether Amazon S3 should restrict public bucket policies for buckets in this account. Setting this element to TRUE
restricts access to buckets with public policies to only Amazon Web Services service principals and authorized users within this account.
Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.
This property is not supported for Amazon S3 on Outposts.
", "locationName":"RestrictPublicBuckets" } }, diff --git a/botocore/data/verifiedpermissions/2021-12-01/service-2.json b/botocore/data/verifiedpermissions/2021-12-01/service-2.json index 5ec41e26d7..9c57215e87 100644 --- a/botocore/data/verifiedpermissions/2021-12-01/service-2.json +++ b/botocore/data/verifiedpermissions/2021-12-01/service-2.json @@ -15,6 +15,22 @@ "uid":"verifiedpermissions-2021-12-01" }, "operations":{ + "BatchGetPolicy":{ + "name":"BatchGetPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetPolicyInput"}, + "output":{"shape":"BatchGetPolicyOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Retrieves information about a group (batch) of policies.
The BatchGetPolicy
operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission verifiedpermissions:GetPolicy
in their IAM policies.
An attribute value of Record type.
Example: {\"record\": { \"keyName\": {} } }
An attribute value of ipaddr type.
Example: {\"ip\": \"192.168.1.100\"}
An attribute value of decimal type.
Example: {\"decimal\": \"1.1\"}
The value of an attribute.
Contains information about the runtime context for a request for which an authorization decision is made.
This data type is used as a member of the ContextDefinition structure which is uses as a request parameter for the IsAuthorized, BatchIsAuthorized, and IsAuthorizedWithToken operations.
", @@ -574,6 +598,138 @@ "max":255, "min":1 }, + "BatchGetPolicyErrorCode":{ + "type":"string", + "enum":[ + "POLICY_STORE_NOT_FOUND", + "POLICY_NOT_FOUND" + ] + }, + "BatchGetPolicyErrorItem":{ + "type":"structure", + "required":[ + "code", + "policyStoreId", + "policyId", + "message" + ], + "members":{ + "code":{ + "shape":"BatchGetPolicyErrorCode", + "documentation":"The error code that was returned.
" + }, + "policyStoreId":{ + "shape":"String", + "documentation":"The identifier of the policy store associated with the failed request.
" + }, + "policyId":{ + "shape":"String", + "documentation":"The identifier of the policy associated with the failed request.
" + }, + "message":{ + "shape":"String", + "documentation":"A detailed error message.
" + } + }, + "documentation":"Contains the information about an error resulting from a BatchGetPolicy
API call.
An array of up to 100 policies you want information about.
" + } + } + }, + "BatchGetPolicyInputItem":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"The identifier of the policy store where the policy you want information about is stored.
" + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"The identifier of the policy you want information about.
" + } + }, + "documentation":"Information about a policy that you include in a BatchGetPolicy
API request.
Information about the policies listed in the request that were successfully returned. These results are returned in the order they were requested.
" + }, + "errors":{ + "shape":"BatchGetPolicyErrorList", + "documentation":"Information about the policies from the request that resulted in an error. These results are returned in the order they were requested.
" + } + } + }, + "BatchGetPolicyOutputItem":{ + "type":"structure", + "required":[ + "policyStoreId", + "policyId", + "policyType", + "definition", + "createdDate", + "lastUpdatedDate" + ], + "members":{ + "policyStoreId":{ + "shape":"PolicyStoreId", + "documentation":"The identifier of the policy store where the policy you want information about is stored.
" + }, + "policyId":{ + "shape":"PolicyId", + "documentation":"The identifier of the policy you want information about.
" + }, + "policyType":{ + "shape":"PolicyType", + "documentation":"The type of the policy. This is one of the following values:
STATIC
TEMPLATE_LINKED
The policy definition of an item in the list of policies returned.
" + }, + "createdDate":{ + "shape":"TimestampFormat", + "documentation":"The date and time the policy was created.
" + }, + "lastUpdatedDate":{ + "shape":"TimestampFormat", + "documentation":"The date and time the policy was most recently updated.
" + } + }, + "documentation":"Contains information about a policy returned from a BatchGetPolicy
API request.
A series of Allow
or Deny
decisions for each request, and the policies that produced them.
A series of Allow
or Deny
decisions for each request, and the policies that produced them. These results are returned in the order they were requested.
A series of Allow
or Deny
decisions for each request, and the policies that produced them.
A series of Allow
or Deny
decisions for each request, and the policies that produced them. These results are returned in the order they were requested.
The identifier of the PolicyStore where the policy you want information about is stored.
" + "documentation":"The identifier of the policy store where the policy you want information about is stored.
" }, "policyId":{ "shape":"PolicyId", @@ -2760,7 +2930,6 @@ }, "SchemaJson":{ "type":"string", - "max":100000, "min":1, "sensitive":true }, diff --git a/botocore/endpoint_provider.py b/botocore/endpoint_provider.py index 9439086c53..38b0a5ffe6 100644 --- a/botocore/endpoint_provider.py +++ b/botocore/endpoint_provider.py @@ -638,7 +638,7 @@ def process_input(self, value): return self.default if self.required: raise EndpointResolutionError( - f"Cannot find value for required parameter {self.name}" + msg=f"Cannot find value for required parameter {self.name}" ) # in all other cases, the parameter will keep the value None else: diff --git a/botocore/handlers.py b/botocore/handlers.py index d44cb07469..6d00ffadf1 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -1287,6 +1287,12 @@ def _update_status_code(response, **kwargs): http_response.status_code = parsed_status_code +def add_query_compatibility_header(model, params, **kwargs): + if not model.service_model.is_query_compatible: + return + params['headers']['x-amzn-query-mode'] = 'true' + + # This is a list of (event_name, handler). # When a Session is created, everything in this list will be # automatically registered with that Session. @@ -1348,6 +1354,7 @@ def _update_status_code(response, **kwargs): ('docs.response-params.s3.*.complete-section', document_expires_shape), ('before-endpoint-resolution.s3', customize_endpoint_resolver_builtins), ('before-call', add_recursion_detection_header), + ('before-call', add_query_compatibility_header), ('before-call.s3', add_expect_header), ('before-call.glacier', add_glacier_version), ('before-call.apigateway', add_accept_header), diff --git a/botocore/model.py b/botocore/model.py index 677266c8d2..70d20f8cca 100644 --- a/botocore/model.py +++ b/botocore/model.py @@ -478,6 +478,10 @@ def signature_version(self): def signature_version(self, value): self._signature_version = value + @CachedProperty + def is_query_compatible(self): + return 'awsQueryCompatible' in self.metadata + def __repr__(self): return f'{self.__class__.__name__}({self.service_name})' diff --git a/docs/source/conf.py b/docs/source/conf.py index 9f9f8d84f4..bd2e28dcf3 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.54' +release = '1.35.55' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/test_sqs.py b/tests/functional/test_sqs.py index bde70c86ed..b1e39b2944 100644 --- a/tests/functional/test_sqs.py +++ b/tests/functional/test_sqs.py @@ -46,3 +46,11 @@ def test_query_compatible_error_parsing(self): self.client.delete_queue( QueueUrl="not-a-real-queue-botocore", ) + + def test_query_compatibility_mode_header_sent(self): + with self.http_stubber as stub: + stub.add_response() + self.client.delete_queue(QueueUrl="not-a-real-queue-botocore") + request = self.http_stubber.requests[0] + assert 'x-amzn-query-mode' in request.headers + assert request.headers['x-amzn-query-mode'] == b'true' diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py index 924abed8af..b26b4b92b2 100644 --- a/tests/unit/test_handlers.py +++ b/tests/unit/test_handlers.py @@ -1944,3 +1944,20 @@ def test_document_response_params_without_expires(document_expires_mocks): mocks['section'].get_section.assert_not_called() mocks['param_section'].add_new_section.assert_not_called() mocks['doc_section'].write.assert_not_called() + + +def test_add_query_compatibility_header(): + service_model = ServiceModel({'metadata': {'awsQueryCompatible': {}}}) + operation_model = OperationModel(mock.Mock(), service_model) + request_dict = {'headers': {}} + handlers.add_query_compatibility_header(operation_model, request_dict) + assert 'x-amzn-query-mode' in request_dict['headers'] + assert request_dict['headers']['x-amzn-query-mode'] == 'true' + + +def test_does_not_add_query_compatibility_header(): + service_model = ServiceModel({'metadata': {}}) + operation_model = OperationModel(mock.Mock(), service_model) + request_dict = {'headers': {}} + handlers.add_query_compatibility_header(operation_model, request_dict) + assert 'x-amzn-query-mode' not in request_dict['headers']