diff --git a/.changes/1.35.73.json b/.changes/1.35.73.json new file mode 100644 index 0000000000..8cc093c5fc --- /dev/null +++ b/.changes/1.35.73.json @@ -0,0 +1,17 @@ +[ + { + "category": "``bedrock-runtime``", + "description": "Add an API parameter that allows customers to set performance configuration for invoking a model.", + "type": "api-change" + }, + { + "category": "``s3control``", + "description": "It allows customers to pass CRC64NVME as a header in S3 Batch Operations copy requests", + "type": "api-change" + }, + { + "category": "``socialmessaging``", + "description": "Added support for passing role arn corresponding to the supported event destination", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e50bd60d13..b6c2168a13 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,14 @@ CHANGELOG ========= +1.35.73 +======= + +* api-change:``bedrock-runtime``: Add an API parameter that allows customers to set performance configuration for invoking a model. +* api-change:``s3control``: It allows customers to pass CRC64NVME as a header in S3 Batch Operations copy requests +* api-change:``socialmessaging``: Added support for passing role arn corresponding to the supported event destination + + 1.35.72 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index ab1c918e1a..cd55779387 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.72' +__version__ = '1.35.73' class NullHandler(logging.Handler): diff --git a/botocore/data/bedrock-runtime/2023-09-30/service-2.json b/botocore/data/bedrock-runtime/2023-09-30/service-2.json index c866a01cad..5ef0e9202e 100644 --- a/botocore/data/bedrock-runtime/2023-09-30/service-2.json +++ b/botocore/data/bedrock-runtime/2023-09-30/service-2.json @@ -52,7 +52,7 @@ {"shape":"ModelNotReadyException"}, {"shape":"ModelErrorException"} ], - "documentation":"
Sends messages to the specified Amazon Bedrock model. Converse
provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. If a model has unique inference parameters, you can also pass those unique parameters to the model.
Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.
You can submit a prompt by including it in the messages
field, specifying the modelId
of a foundation model or inference profile to run inference on it, and including any other fields that are relevant to your use case.
You can also submit a prompt from Prompt management by specifying the ARN of the prompt version and including a map of variables to values in the promptVariables
field. You can append more messages to the prompt by using the messages
field. If you use a prompt from Prompt management, you can't include the following fields in the request: additionalModelRequestFields
, inferenceConfig
, system
, or toolConfig
. Instead, these fields must be defined through Prompt management. For more information, see Use a prompt from Prompt management.
For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide
For example code, see Converse API examples in the Amazon Bedrock User Guide.
This operation requires permission for the bedrock:InvokeModel
action.
Sends messages to the specified Amazon Bedrock model. Converse
provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. If a model has unique inference parameters, you can also pass those unique parameters to the model.
Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.
You can submit a prompt by including it in the messages
field, specifying the modelId
of a foundation model or inference profile to run inference on it, and including any other fields that are relevant to your use case.
You can also submit a prompt from Prompt management by specifying the ARN of the prompt version and including a map of variables to values in the promptVariables
field. You can append more messages to the prompt by using the messages
field. If you use a prompt from Prompt management, you can't include the following fields in the request: additionalModelRequestFields
, inferenceConfig
, system
, or toolConfig
. Instead, these fields must be defined through Prompt management. For more information, see Use a prompt from Prompt management.
For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide
For example code, see Converse API examples in the Amazon Bedrock User Guide.
This operation requires permission for the bedrock:InvokeModel
action.
To deny all inference access to resources that you specify in the modelId field, you need to deny access to the bedrock:InvokeModel
and bedrock:InvokeModelWithResponseStream
actions. Doing this also denies access to the resource through the base inference actions (InvokeModel and InvokeModelWithResponseStream). For more information see Deny access for inference on specific models.
For troubleshooting some of the common errors you might encounter when using the Converse
API, see Troubleshooting Amazon Bedrock API Error Codes in the Amazon Bedrock User Guide
Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream
provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model.
To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported
field in the response.
The CLI doesn't support streaming operations in Amazon Bedrock, including ConverseStream
.
Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.
You can submit a prompt by including it in the messages
field, specifying the modelId
of a foundation model or inference profile to run inference on it, and including any other fields that are relevant to your use case.
You can also submit a prompt from Prompt management by specifying the ARN of the prompt version and including a map of variables to values in the promptVariables
field. You can append more messages to the prompt by using the messages
field. If you use a prompt from Prompt management, you can't include the following fields in the request: additionalModelRequestFields
, inferenceConfig
, system
, or toolConfig
. Instead, these fields must be defined through Prompt management. For more information, see Use a prompt from Prompt management.
For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide
For example code, see Conversation streaming example in the Amazon Bedrock User Guide.
This operation requires permission for the bedrock:InvokeModelWithResponseStream
action.
Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream
provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model.
To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported
field in the response.
The CLI doesn't support streaming operations in Amazon Bedrock, including ConverseStream
.
Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response.
You can submit a prompt by including it in the messages
field, specifying the modelId
of a foundation model or inference profile to run inference on it, and including any other fields that are relevant to your use case.
You can also submit a prompt from Prompt management by specifying the ARN of the prompt version and including a map of variables to values in the promptVariables
field. You can append more messages to the prompt by using the messages
field. If you use a prompt from Prompt management, you can't include the following fields in the request: additionalModelRequestFields
, inferenceConfig
, system
, or toolConfig
. Instead, these fields must be defined through Prompt management. For more information, see Use a prompt from Prompt management.
For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide
For example code, see Conversation streaming example in the Amazon Bedrock User Guide.
This operation requires permission for the bedrock:InvokeModelWithResponseStream
action.
To deny all inference access to resources that you specify in the modelId field, you need to deny access to the bedrock:InvokeModel
and bedrock:InvokeModelWithResponseStream
actions. Doing this also denies access to the resource through the base inference actions (InvokeModel and InvokeModelWithResponseStream). For more information see Deny access for inference on specific models.
For troubleshooting some of the common errors you might encounter when using the ConverseStream
API, see Troubleshooting Amazon Bedrock API Error Codes in the Amazon Bedrock User Guide
Invokes the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. You use model inference to generate text, images, and embeddings.
For example code, see Invoke model code examples in the Amazon Bedrock User Guide.
This operation requires permission for the bedrock:InvokeModel
action.
Invokes the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. You use model inference to generate text, images, and embeddings.
For example code, see Invoke model code examples in the Amazon Bedrock User Guide.
This operation requires permission for the bedrock:InvokeModel
action.
To deny all inference access to resources that you specify in the modelId field, you need to deny access to the bedrock:InvokeModel
and bedrock:InvokeModelWithResponseStream
actions. Doing this also denies access to the resource through the Converse API actions (Converse and ConverseStream). For more information see Deny access for inference on specific models.
For troubleshooting some of the common errors you might encounter when using the InvokeModel
API, see Troubleshooting Amazon Bedrock API Error Codes in the Amazon Bedrock User Guide
Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.
To see if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported
field in the response.
The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeModelWithResponseStream
.
For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide.
This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream
action.
Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.
To see if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported
field in the response.
The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeModelWithResponseStream
.
For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide.
This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream
action.
To deny all inference access to resources that you specify in the modelId field, you need to deny access to the bedrock:InvokeModel
and bedrock:InvokeModelWithResponseStream
actions. Doing this also denies access to the resource through the Converse API actions (Converse and ConverseStream). For more information see Deny access for inference on specific models.
For troubleshooting some of the common errors you might encounter when using the InvokeModelWithResponseStream
API, see Troubleshooting Amazon Bedrock API Error Codes in the Amazon Bedrock User Guide
The request is denied because of missing access permissions.
", + "documentation":"The request is denied because you do not have sufficient permissions to perform the requested action. For troubleshooting this error, see AccessDeniedException in the Amazon Bedrock User Guide
", "error":{ "httpStatusCode":403, "senderFault":true @@ -369,7 +369,7 @@ "members":{ "modelId":{ "shape":"ConversationalModelId", - "documentation":"Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
To include a prompt that was defined in Prompt management, specify the ARN of the prompt version to use.
The Converse API doesn't support imported models.
", + "documentation":"Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
To include a prompt that was defined in Prompt management, specify the ARN of the prompt version to use.
The Converse API doesn't support imported models.
", "location":"uri", "locationName":"modelId" }, @@ -404,6 +404,10 @@ "additionalModelResponseFieldPaths":{ "shape":"ConverseRequestAdditionalModelResponseFieldPathsList", "documentation":"Additional model parameters field paths to return in the response. Converse
and ConverseStream
return the requested fields as a JSON Pointer object in the additionalModelResponseFields
field. The following is example JSON for additionalModelResponseFieldPaths
.
[ \"/stop_sequence\" ]
For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation.
Converse
and ConverseStream
reject an empty JSON Pointer or incorrectly structured JSON Pointer with a 400
error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by Converse
.
Model performance settings for the request.
" } } }, @@ -450,6 +454,10 @@ "trace":{ "shape":"ConverseTrace", "documentation":"A trace object that contains information about the Guardrail behavior.
" + }, + "performanceConfig":{ + "shape":"PerformanceConfiguration", + "documentation":"Model performance settings for the request.
" } } }, @@ -471,6 +479,10 @@ "trace":{ "shape":"ConverseStreamTrace", "documentation":"The trace object in the response from ConverseStream that contains information about the guardrail behavior.
" + }, + "performanceConfig":{ + "shape":"PerformanceConfiguration", + "documentation":"Model performance configuration metadata for the conversation stream event.
" } }, "documentation":"A conversation stream metadata event.
", @@ -532,7 +544,7 @@ }, "serviceUnavailableException":{ "shape":"ServiceUnavailableException", - "documentation":"The service isn't currently available. Try again later.
" + "documentation":"The service isn't currently available. For troubleshooting this error, see ServiceUnavailable in the Amazon Bedrock User Guide
" } }, "documentation":"The messages output stream
", @@ -544,7 +556,7 @@ "members":{ "modelId":{ "shape":"ConversationalModelId", - "documentation":"Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
To include a prompt that was defined in Prompt management, specify the ARN of the prompt version to use.
The Converse API doesn't support imported models.
", + "documentation":"Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
To include a prompt that was defined in Prompt management, specify the ARN of the prompt version to use.
The Converse API doesn't support imported models.
", "location":"uri", "locationName":"modelId" }, @@ -579,6 +591,10 @@ "additionalModelResponseFieldPaths":{ "shape":"ConverseStreamRequestAdditionalModelResponseFieldPathsList", "documentation":"Additional model parameters field paths to return in the response. Converse
and ConverseStream
return the requested fields as a JSON Pointer object in the additionalModelResponseFields
field. The following is example JSON for additionalModelResponseFieldPaths
.
[ \"/stop_sequence\" ]
For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation.
Converse
and ConverseStream
reject an empty JSON Pointer or incorrectly structured JSON Pointer with a 400
error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by Converse
.
Model performance settings for the request.
" } } }, @@ -1508,7 +1524,7 @@ "members":{ "message":{"shape":"NonBlankString"} }, - "documentation":"An internal server error occurred. Retry your request.
", + "documentation":"An internal server error occurred. For troubleshooting this error, see InternalFailure in the Amazon Bedrock User Guide
", "error":{"httpStatusCode":500}, "exception":true, "fault":true @@ -1541,7 +1557,7 @@ }, "modelId":{ "shape":"InvokeModelIdentifier", - "documentation":"The unique identifier of the model to invoke to run inference.
The modelId
to provide depends on the type of model that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
If you use an imported model, specify the ARN of the imported model. You can get the model ARN from a successful call to CreateModelImportJob or from the Imported models page in the Amazon Bedrock console.
The unique identifier of the model to invoke to run inference.
The modelId
to provide depends on the type of model or throughput that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
If you use an imported model, specify the ARN of the imported model. You can get the model ARN from a successful call to CreateModelImportJob or from the Imported models page in the Amazon Bedrock console.
The version number for the guardrail. The value can also be DRAFT
.
Model performance settings for the request.
", + "location":"header", + "locationName":"X-Amzn-Bedrock-PerformanceConfig-Latency" } }, "payload":"body" @@ -1582,6 +1604,12 @@ "documentation":"The MIME type of the inference result.
", "location":"header", "locationName":"Content-Type" + }, + "performanceConfigLatency":{ + "shape":"PerformanceConfigLatency", + "documentation":"Model performance settings for the request.
", + "location":"header", + "locationName":"X-Amzn-Bedrock-PerformanceConfig-Latency" } }, "payload":"body" @@ -1608,7 +1636,7 @@ }, "modelId":{ "shape":"InvokeModelIdentifier", - "documentation":"The unique identifier of the model to invoke to run inference.
The modelId
to provide depends on the type of model that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
If you use an imported model, specify the ARN of the imported model. You can get the model ARN from a successful call to CreateModelImportJob or from the Imported models page in the Amazon Bedrock console.
The unique identifier of the model to invoke to run inference.
The modelId
to provide depends on the type of model or throughput that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
If you use an imported model, specify the ARN of the imported model. You can get the model ARN from a successful call to CreateModelImportJob or from the Imported models page in the Amazon Bedrock console.
The version number for the guardrail. The value can also be DRAFT
.
Model performance settings for the request.
", + "location":"header", + "locationName":"X-Amzn-Bedrock-PerformanceConfig-Latency" } }, "payload":"body" @@ -1649,6 +1683,12 @@ "documentation":"The MIME type of the inference result.
", "location":"header", "locationName":"X-Amzn-Bedrock-Content-Type" + }, + "performanceConfigLatency":{ + "shape":"PerformanceConfigLatency", + "documentation":"Model performance settings for the request.
", + "location":"header", + "locationName":"X-Amzn-Bedrock-PerformanceConfig-Latency" } }, "payload":"body" @@ -1808,6 +1848,23 @@ "event":true, "sensitive":true }, + "PerformanceConfigLatency":{ + "type":"string", + "enum":[ + "standard", + "optimized" + ] + }, + "PerformanceConfiguration":{ + "type":"structure", + "members":{ + "latency":{ + "shape":"PerformanceConfigLatency", + "documentation":"To use a latency-optimized version of the model, set to optimized
.
Performance settings for a model.
" + }, "PromptVariableMap":{ "type":"map", "key":{"shape":"String"}, @@ -1830,7 +1887,7 @@ "members":{ "message":{"shape":"NonBlankString"} }, - "documentation":"The specified resource ARN was not found. Check the ARN and try your request again.
", + "documentation":"The specified resource ARN was not found. For troubleshooting this error, see ResourceNotFound in the Amazon Bedrock User Guide
", "error":{ "httpStatusCode":404, "senderFault":true @@ -1864,7 +1921,10 @@ "shape":"ModelTimeoutException", "documentation":"The request took too long to process. Processing time exceeded the model timeout length.
" }, - "serviceUnavailableException":{"shape":"ServiceUnavailableException"} + "serviceUnavailableException":{ + "shape":"ServiceUnavailableException", + "documentation":"The service isn't available. Try again later.
" + } }, "documentation":"Definition of content in the response stream.
", "eventstream":true @@ -1886,7 +1946,7 @@ "members":{ "message":{"shape":"NonBlankString"} }, - "documentation":"The service isn't currently available. Try again later.
", + "documentation":"The service isn't currently available. For troubleshooting this error, see ServiceUnavailable in the Amazon Bedrock User Guide
", "error":{"httpStatusCode":503}, "exception":true, "fault":true @@ -1952,7 +2012,7 @@ "members":{ "message":{"shape":"NonBlankString"} }, - "documentation":"Your request was throttled because of service-wide limitations. Resubmit your request later or in a different region. You can also purchase Provisioned Throughput to increase the rate or number of tokens you can process.
", + "documentation":"Your request was denied due to exceeding the account quotas for Amazon Bedrock. For troubleshooting this error, see ThrottlingException in the Amazon Bedrock User Guide
", "error":{ "httpStatusCode":429, "senderFault":true @@ -2212,7 +2272,7 @@ "members":{ "message":{"shape":"NonBlankString"} }, - "documentation":"Input validation failed. Check your request parameters and retry the request.
", + "documentation":"The input fails to satisfy the constraints specified by Amazon Bedrock. For troubleshooting this error, see ValidationError in the Amazon Bedrock User Guide
", "error":{ "httpStatusCode":400, "senderFault":true diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 05725829a5..1cbdc68d02 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -13665,8 +13665,32 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "metrics-fips.sagemaker.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "metrics-fips.sagemaker.ca-central-1.amazonaws.com" + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "metrics-fips.sagemaker.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1-fips" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "metrics-fips.sagemaker.ca-west-1.amazonaws.com" + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -13679,10 +13703,58 @@ "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "metrics-fips.sagemaker.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "metrics-fips.sagemaker.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "metrics-fips.sagemaker.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "metrics-fips.sagemaker.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "metrics-fips.sagemaker.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "metrics-fips.sagemaker.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "metrics-fips.sagemaker.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "metrics-fips.sagemaker.us-west-2.amazonaws.com" + } } }, "mgh" : { @@ -20936,8 +21008,34 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "streams.dynamodb-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-central-1-fips" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb-fips.ca-central-1.amazonaws.com", + "protocols" : [ "https" ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "streams.dynamodb-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1-fips" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb-fips.ca-west-1.amazonaws.com", + "protocols" : [ "https" ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -20957,10 +21055,62 @@ "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "streams.dynamodb-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb-fips.us-east-1.amazonaws.com", + "protocols" : [ "https" ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "streams.dynamodb-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb-fips.us-east-2.amazonaws.com", + "protocols" : [ "https" ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "streams.dynamodb-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb-fips.us-west-1.amazonaws.com", + "protocols" : [ "https" ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "streams.dynamodb-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb-fips.us-west-2.amazonaws.com", + "protocols" : [ "https" ] + } } }, "sts" : { @@ -23888,8 +24038,18 @@ }, "elasticbeanstalk" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "elasticbeanstalk.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "elasticbeanstalk.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "elasticfilesystem" : { @@ -29722,7 +29882,7 @@ "endpoints" : { "us-gov-east-1" : { "variants" : [ { - "hostname" : "streams.dynamodb.us-gov-east-1.amazonaws.com", + "hostname" : "streams.dynamodb-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] } ] }, @@ -29731,11 +29891,12 @@ "region" : "us-gov-east-1" }, "deprecated" : true, - "hostname" : "streams.dynamodb.us-gov-east-1.amazonaws.com" + "hostname" : "streams.dynamodb-fips.us-gov-east-1.amazonaws.com", + "protocols" : [ "https" ] }, "us-gov-west-1" : { "variants" : [ { - "hostname" : "streams.dynamodb.us-gov-west-1.amazonaws.com", + "hostname" : "streams.dynamodb-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] } ] }, @@ -29744,7 +29905,8 @@ "region" : "us-gov-west-1" }, "deprecated" : true, - "hostname" : "streams.dynamodb.us-gov-west-1.amazonaws.com" + "hostname" : "streams.dynamodb-fips.us-gov-west-1.amazonaws.com", + "protocols" : [ "https" ] } } }, @@ -31087,8 +31249,34 @@ } }, "endpoints" : { - "us-iso-east-1" : { }, - "us-iso-west-1" : { } + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-iso-east-1-fips" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov", + "protocols" : [ "https" ] + }, + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "streams.dynamodb-fips.us-iso-west-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-iso-west-1-fips" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb-fips.us-iso-west-1.c2s.ic.gov", + "protocols" : [ "https" ] + } } }, "sts" : { @@ -31801,7 +31989,20 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { - "us-isob-east-1" : { } + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "streams.dynamodb-fips.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + }, + "us-isob-east-1-fips" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "streams.dynamodb-fips.us-isob-east-1.sc2s.sgov.gov", + "protocols" : [ "https" ] + } } }, "sts" : { diff --git a/botocore/data/s3control/2018-08-20/service-2.json b/botocore/data/s3control/2018-08-20/service-2.json index 9b41eb0659..f7f4126612 100644 --- a/botocore/data/s3control/2018-08-20/service-2.json +++ b/botocore/data/s3control/2018-08-20/service-2.json @@ -7395,7 +7395,8 @@ "CRC32", "CRC32C", "SHA1", - "SHA256" + "SHA256", + "CRC64NVME" ] }, "S3ContentLength":{ @@ -7407,7 +7408,7 @@ "members":{ "TargetResource":{ "shape":"S3RegionalOrS3ExpressBucketArnString", - "documentation":"Specifies the destination bucket Amazon Resource Name (ARN) for the batch copy operation.
General purpose buckets - For example, to copy objects to a general purpose bucket named destinationBucket
, set the TargetResource
property to arn:aws:s3:::destinationBucket
.
Directory buckets - For example, to copy objects to a directory bucket named destinationBucket
in the Availability Zone identified by the AZ ID usw2-az1
, set the TargetResource
property to arn:aws:s3express:region:account_id:/bucket/destination_bucket_base_name--usw2-az1--x-s3
. A directory bucket as a destination bucket can be in Availability Zone or Local Zone.
Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, you get an HTTP 400 Bad Request
error with the error code InvalidRequest
.
Specifies the destination bucket Amazon Resource Name (ARN) for the batch copy operation.
General purpose buckets - For example, to copy objects to a general purpose bucket named destinationBucket
, set the TargetResource
property to arn:aws:s3:::destinationBucket
.
Directory buckets - For example, to copy objects to a directory bucket named destinationBucket
in the Availability Zone; identified by the AZ ID usw2-az1
, set the TargetResource
property to arn:aws:s3express:region:account_id:/bucket/destination_bucket_base_name--usw2-az1--x-s3
.
The details for unregistered WhatsApp phone numbers.
" + }, + "wabaId":{ + "shape":"LinkedWhatsAppBusinessAccountId", + "documentation":"The Amazon Resource Name (ARN) of the WhatsApp Business Account ID.
" } }, "documentation":"Contains your WhatsApp registration status and details of any unregistered WhatsApp phone number.
" @@ -800,6 +804,10 @@ }, "exception":true }, + "RoleArn":{ + "type":"string", + "pattern":"arn:aws:iam::\\d{12}:role\\/[a-zA-Z0-9+=,.@\\-_]+" + }, "S3File":{ "type":"structure", "required":[ @@ -1058,6 +1066,10 @@ "eventDestinationArn":{ "shape":"EventDestinationArn", "documentation":"The ARN of the event destination.
" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"The Amazon Resource Name (ARN) of an Identity and Access Management role that is able to import phone numbers and write events.
" } }, "documentation":"Contains information on the event destination.
" diff --git a/docs/source/conf.py b/docs/source/conf.py index 740052a915..a49fbbf691 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.72' +release = '1.35.73' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.