diff --git a/.changes/1.35.32.json b/.changes/1.35.32.json new file mode 100644 index 0000000000..393c404ad6 --- /dev/null +++ b/.changes/1.35.32.json @@ -0,0 +1,47 @@ +[ + { + "category": "``appstream``", + "description": "Added support for Automatic Time Zone Redirection on Amazon AppStream 2.0", + "type": "api-change" + }, + { + "category": "``b2bi``", + "description": "Added and updated APIs to support outbound EDI transformations", + "type": "api-change" + }, + { + "category": "``bedrock-agent-runtime``", + "description": "Added raw model response and usage metrics to PreProcessing and PostProcessing Trace", + "type": "api-change" + }, + { + "category": "``bedrock-runtime``", + "description": "Added new fields to Amazon Bedrock Guardrails trace", + "type": "api-change" + }, + { + "category": "``iotdeviceadvisor``", + "description": "Add clientToken attribute and implement idempotency for CreateSuiteDefinition.", + "type": "api-change" + }, + { + "category": "``ivs-realtime``", + "description": "Adds new Stage Health EventErrorCodes applicable to RTMP(S) broadcasts. Bug Fix: Enforces that EncoderConfiguration Video height and width must be even-number values.", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "This release introduces a header representing the minimum object size limit for Lifecycle transitions.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "releasing builtinlcc to public", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "WSP is being rebranded to become DCV.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d7e6ff25e5..767de1b85c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,20 @@ CHANGELOG ========= +1.35.32 +======= + +* api-change:``appstream``: Added support for Automatic Time Zone Redirection on Amazon AppStream 2.0 +* api-change:``b2bi``: Added and updated APIs to support outbound EDI transformations +* api-change:``bedrock-agent-runtime``: Added raw model response and usage metrics to PreProcessing and PostProcessing Trace +* api-change:``bedrock-runtime``: Added new fields to Amazon Bedrock Guardrails trace +* api-change:``iotdeviceadvisor``: Add clientToken attribute and implement idempotency for CreateSuiteDefinition. +* api-change:``ivs-realtime``: Adds new Stage Health EventErrorCodes applicable to RTMP(S) broadcasts. Bug Fix: Enforces that EncoderConfiguration Video height and width must be even-number values. +* api-change:``s3``: This release introduces a header representing the minimum object size limit for Lifecycle transitions. +* api-change:``sagemaker``: releasing builtinlcc to public +* api-change:``workspaces``: WSP is being rebranded to become DCV. + + 1.35.31 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 65f85cf694..de4327ba86 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.31' +__version__ = '1.35.32' class NullHandler(logging.Handler): diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index 7e7b9238fc..03b7cf15a6 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -1298,7 +1298,8 @@ "FILE_DOWNLOAD", "PRINTING_TO_LOCAL_DEVICE", "DOMAIN_PASSWORD_SIGNIN", - "DOMAIN_SMART_CARD_SIGNIN" + "DOMAIN_SMART_CARD_SIGNIN", + "AUTO_TIME_ZONE_REDIRECTION" ] }, "AppBlock":{ diff --git a/botocore/data/b2bi/2022-06-23/service-2.json b/botocore/data/b2bi/2022-06-23/service-2.json index 85839cbfba..9b4e349d7f 100644 --- a/botocore/data/b2bi/2022-06-23/service-2.json +++ b/botocore/data/b2bi/2022-06-23/service-2.json @@ -76,6 +76,22 @@ "documentation":"
Creates a customer profile. You can have up to five customer profiles, each representing a distinct private network. A profile is the mechanism used to create the concept of a private network.
", "idempotent":true }, + "CreateStarterMappingTemplate":{ + "name":"CreateStarterMappingTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateStarterMappingTemplateRequest"}, + "output":{"shape":"CreateStarterMappingTemplateResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Amazon Web Services B2B Data Interchange uses a mapping template in JSONata or XSLT format to transform a customer input file into a JSON or XML file that can be converted to EDI.
If you provide a sample EDI file with the same structure as the EDI files that you wish to generate, then the service can generate a mapping template. The starter template contains placeholder values which you can replace with JSONata or XSLT expressions to take data from your input file and insert it into the JSON or XML file that is used to generate the EDI.
If you do not provide a sample EDI file, then the service can generate a mapping template based on the EDI settings in the templateDetails
parameter.
Currently, we only support generating a template that can generate the input to produce an Outbound X12 EDI file.
" + }, "CreateTransformer":{ "name":"CreateTransformer", "http":{ @@ -93,7 +109,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"Creates a transformer. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file.
", + "documentation":"Creates a transformer. Amazon Web Services B2B Data Interchange currently supports two scenarios:
Inbound EDI: the Amazon Web Services customer receives an EDI file from their trading partner. Amazon Web Services B2B Data Interchange converts this EDI file into a JSON or XML file with a service-defined structure. A mapping template provided by the customer, in JSONata or XSLT format, is optionally applied to this file to produce a JSON or XML file with the structure the customer requires.
Outbound EDI: the Amazon Web Services customer has a JSON or XML file containing data that they wish to use in an EDI file. A mapping template, provided by the customer (in either JSONata or XSLT format) is applied to this file to generate a JSON or XML file in the service-defined structure. This file is then converted to an EDI file.
The following fields are provided for backwards compatibility only: fileFormat
, mappingTemplate
, ediType
, and sampleDocument
.
Use the mapping
data type in place of mappingTemplate
and fileFormat
Use the sampleDocuments
data type in place of sampleDocument
Use either the inputConversion
or outputConversion
in place of ediType
Deletes the specified transformer. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file.
", + "documentation":"Deletes the specified transformer. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.
", "idempotent":true }, "GetCapability":{ @@ -234,7 +250,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"Retrieves the details for the transformer specified by the transformer ID. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file.
" + "documentation":"Retrieves the details for the transformer specified by the transformer ID. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.
" }, "GetTransformerJob":{ "name":"GetTransformerJob", @@ -331,7 +347,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"Lists the available transformers. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file.
" + "documentation":"Lists the available transformers. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.
" }, "StartTransformerJob":{ "name":"StartTransformerJob", @@ -349,7 +365,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2BI Data Interchange.
If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just create and configure a transformer, and then run the StartTransformerJob
API to process your files.
Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2B Data Interchange.
If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just create and configure a transformer, and then run the StartTransformerJob
API to process your files.
Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are capability, partnership, profile, transformers and other entities.
There is no response returned from this call.
" }, + "TestConversion":{ + "name":"TestConversion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestConversionRequest"}, + "output":{"shape":"TestConversionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"This operation mimics the latter half of a typical Outbound EDI request. It takes an input JSON/XML in the B2Bi shape as input, converts it to an X12 EDI string, and return that string.
", + "idempotent":true + }, "TestMapping":{ "name":"TestMapping", "http":{ @@ -495,7 +529,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"Updates the specified parameters for a transformer. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file.
", + "documentation":"Updates the specified parameters for a transformer. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.
", "idempotent":true } }, @@ -535,6 +569,13 @@ "documentation":"A capability object. Currently, only EDI (electronic data interchange) capabilities are supported. A trading capability contains the information required to transform incoming EDI documents into JSON or XML outputs.
", "union":true }, + "CapabilityDirection":{ + "type":"string", + "enum":[ + "INBOUND", + "OUTBOUND" + ] + }, "CapabilityId":{ "type":"string", "max":64, @@ -550,6 +591,16 @@ "max":254, "min":1 }, + "CapabilityOptions":{ + "type":"structure", + "members":{ + "outboundEdi":{ + "shape":"OutboundEdiOptions", + "documentation":"A structure that contains the outbound EDI options.
" + } + }, + "documentation":"Contains the details for an Outbound EDI capability.
" + }, "CapabilitySummary":{ "type":"structure", "required":[ @@ -595,6 +646,62 @@ "documentation":"A conflict exception is thrown when you attempt to delete a resource (such as a profile or a capability) that is being used by other resources.
", "exception":true }, + "ConversionSource":{ + "type":"structure", + "required":[ + "fileFormat", + "inputFile" + ], + "members":{ + "fileFormat":{ + "shape":"ConversionSourceFormat", + "documentation":"The format for the input file: either JSON or XML.
" + }, + "inputFile":{ + "shape":"InputFileSource", + "documentation":"File to be converted
" + } + }, + "documentation":"Describes the input for an outbound transformation.
" + }, + "ConversionSourceFormat":{ + "type":"string", + "enum":[ + "JSON", + "XML" + ] + }, + "ConversionTarget":{ + "type":"structure", + "required":["fileFormat"], + "members":{ + "fileFormat":{ + "shape":"ConversionTargetFormat", + "documentation":"Currently, only X12 format is supported.
" + }, + "formatDetails":{ + "shape":"ConversionTargetFormatDetails", + "documentation":"A structure that contains the formatting details for the conversion target.
" + }, + "outputSampleFile":{ + "shape":"OutputSampleFileSource", + "documentation":"Customer uses this to provide a sample on what should file look like after conversion X12 EDI use case around this would be discovering the file syntax
" + } + }, + "documentation":"Provide a sample of what the output of the transformation should look like.
" + }, + "ConversionTargetFormat":{ + "type":"string", + "enum":["X12"] + }, + "ConversionTargetFormatDetails":{ + "type":"structure", + "members":{ + "x12":{"shape":"X12Details"} + }, + "documentation":"Contains a structure describing the X12 details for the conversion target.
", + "union":true + }, "CreateCapabilityRequest":{ "type":"structure", "required":[ @@ -700,6 +807,10 @@ "shape":"PartnershipCapabilities", "documentation":"Specifies a list of the capabilities associated with this partnership.
" }, + "capabilityOptions":{ + "shape":"CapabilityOptions", + "documentation":"Specify the structure that contains the details for the associated capabilities.
" + }, "clientToken":{ "shape":"String", "documentation":"Reserved for future use.
", @@ -748,6 +859,10 @@ "shape":"PartnershipCapabilities", "documentation":"Returns one or more capabilities associated with this partnership.
" }, + "capabilityOptions":{ + "shape":"CapabilityOptions", + "documentation":"Returns the structure that contains the details for the associated capabilities.
" + }, "tradingPartnerId":{ "shape":"TradingPartnerId", "documentation":"Returns the unique, system-generated identifier for a trading partner.
" @@ -847,43 +962,93 @@ } } }, - "CreateTransformerRequest":{ + "CreateStarterMappingTemplateRequest":{ "type":"structure", "required":[ - "name", - "fileFormat", - "mappingTemplate", - "ediType" + "mappingType", + "templateDetails" ], + "members":{ + "outputSampleLocation":{ + "shape":"S3Location", + "documentation":"Specify the location of the sample EDI file that is used to generate the mapping template.
" + }, + "mappingType":{ + "shape":"MappingType", + "documentation":"Specify the format for the mapping template: either JSONATA or XSLT.
" + }, + "templateDetails":{ + "shape":"TemplateDetails", + "documentation":"Describes the details needed for generating the template. Specify the X12 transaction set and version for which the template is used: currently, we only support X12.
" + } + } + }, + "CreateStarterMappingTemplateResponse":{ + "type":"structure", + "required":["mappingTemplate"], + "members":{ + "mappingTemplate":{ + "shape":"String", + "documentation":"Returns a string that represents the mapping template.
" + } + } + }, + "CreateTransformerRequest":{ + "type":"structure", + "required":["name"], "members":{ "name":{ "shape":"TransformerName", "documentation":"Specifies the name of the transformer, used to identify it.
" }, + "clientToken":{ + "shape":"String", + "documentation":"Reserved for future use.
", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagList", + "documentation":"Specifies the key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (capabilities, partnerships, and so on) for any purpose.
" + }, "fileFormat":{ "shape":"FileFormat", - "documentation":"Specifies that the currently supported file formats for EDI transformations are JSON
and XML
.
Specifies that the currently supported file formats for EDI transformations are JSON
and XML
.
Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
" + "documentation":"Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
This parameter is available for backwards compatibility. Use the Mapping data type instead.
Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
" + "documentation":"Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "sampleDocument":{ "shape":"FileLocation", - "documentation":"Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data.
" + "documentation":"Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, - "clientToken":{ - "shape":"String", - "documentation":"Reserved for future use.
", - "idempotencyToken":true + "inputConversion":{ + "shape":"InputConversion", + "documentation":"Specify the InputConversion
object, which contains the format options for the inbound transformation.
Specifies the key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (capabilities, partnerships, and so on) for any purpose.
" + "mapping":{ + "shape":"Mapping", + "documentation":"Specify the structure that contains the mapping template and its language (either XSLT or JSONATA).
" + }, + "outputConversion":{ + "shape":"OutputConversion", + "documentation":"A structure that contains the OutputConversion
object, which contains the format options for the outbound transformation.
Specify a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.
" } } }, @@ -893,10 +1058,7 @@ "transformerId", "transformerArn", "name", - "fileFormat", - "mappingTemplate", "status", - "ediType", "createdAt" ], "members":{ @@ -912,29 +1074,53 @@ "shape":"TransformerName", "documentation":"Returns the name of the transformer, used to identify it.
" }, + "status":{ + "shape":"TransformerStatus", + "documentation":"Returns the state of the newly created transformer. The transformer can be either active
or inactive
. For the transformer to be used in a capability, its status must active
.
Returns a timestamp for creation date and time of the transformer.
" + }, "fileFormat":{ "shape":"FileFormat", - "documentation":"Returns that the currently supported file formats for EDI transformations are JSON
and XML
.
Returns that the currently supported file formats for EDI transformations are JSON
and XML
.
Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
" - }, - "status":{ - "shape":"TransformerStatus", - "documentation":"Returns the state of the newly created transformer. The transformer can be either active
or inactive
. For the transformer to be used in a capability, its status must active
.
Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "ediType":{ "shape":"EdiType", - "documentation":"Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
" + "documentation":"Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "sampleDocument":{ "shape":"FileLocation", - "documentation":"Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.
" + "documentation":"Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, - "createdAt":{ - "shape":"CreatedDate", - "documentation":"Returns a timestamp for creation date and time of the transformer.
" + "inputConversion":{ + "shape":"InputConversion", + "documentation":"Returns the InputConversion
object, which contains the format options for the inbound transformation.
Returns the structure that contains the mapping template and its language (either XSLT or JSONATA).
" + }, + "outputConversion":{ + "shape":"OutputConversion", + "documentation":"Returns the OutputConversion
object, which contains the format options for the outbound transformation.
Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.
" } } }, @@ -991,6 +1177,10 @@ "transformerId" ], "members":{ + "capabilityDirection":{ + "shape":"CapabilityDirection", + "documentation":"Specifies whether this is capability is for inbound or outbound transformations.
" + }, "type":{ "shape":"EdiType", "documentation":"Returns the type of the capability. Currently, only edi
is supported.
A structure that contains the X12 transaction set and version.
", + "union":true + }, + "FromFormat":{ + "type":"string", + "enum":["X12"] + }, "GetCapabilityRequest":{ "type":"structure", "required":["capabilityId"], @@ -1147,6 +1350,7 @@ "shape":"PartnershipCapabilities", "documentation":"Returns one or more capabilities associated with this partnership.
" }, + "capabilityOptions":{"shape":"CapabilityOptions"}, "tradingPartnerId":{ "shape":"TradingPartnerId", "documentation":"Returns the unique identifier for the partner for this partnership.
" @@ -1275,10 +1479,7 @@ "transformerId", "transformerArn", "name", - "fileFormat", - "mappingTemplate", "status", - "ediType", "createdAt" ], "members":{ @@ -1294,36 +1495,91 @@ "shape":"TransformerName", "documentation":"Returns the name of the transformer, used to identify it.
" }, + "status":{ + "shape":"TransformerStatus", + "documentation":"Returns the state of the newly created transformer. The transformer can be either active
or inactive
. For the transformer to be used in a capability, its status must active
.
Returns a timestamp for creation date and time of the transformer.
" + }, + "modifiedAt":{ + "shape":"ModifiedDate", + "documentation":"Returns a timestamp for last time the transformer was modified.
" + }, "fileFormat":{ "shape":"FileFormat", - "documentation":"Returns that the currently supported file formats for EDI transformations are JSON
and XML
.
Returns that the currently supported file formats for EDI transformations are JSON
and XML
.
Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
" - }, - "status":{ - "shape":"TransformerStatus", - "documentation":"Returns the state of the newly created transformer. The transformer can be either active
or inactive
. For the transformer to be used in a capability, its status must active
.
Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "ediType":{ "shape":"EdiType", - "documentation":"Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
" + "documentation":"Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "sampleDocument":{ "shape":"FileLocation", - "documentation":"Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.
" + "documentation":"Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, - "createdAt":{ - "shape":"CreatedDate", - "documentation":"Returns a timestamp for creation date and time of the transformer.
" + "inputConversion":{ + "shape":"InputConversion", + "documentation":"Returns the InputConversion
object, which contains the format options for the inbound transformation.
Returns a timestamp for last time the transformer was modified.
" + "mapping":{ + "shape":"Mapping", + "documentation":"Returns the structure that contains the mapping template and its language (either XSLT or JSONATA).
" + }, + "outputConversion":{ + "shape":"OutputConversion", + "documentation":"Returns the OutputConversion
object, which contains the format options for the outbound transformation.
Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.
" } } }, + "InputConversion":{ + "type":"structure", + "required":["fromFormat"], + "members":{ + "fromFormat":{ + "shape":"FromFormat", + "documentation":"The format for the transformer input: currently on X12
is supported.
A structure that contains the formatting options for an inbound transformer.
" + } + }, + "documentation":"Contains the input formatting options for an inbound transformer (takes an X12-formatted EDI document as input and converts it to JSON or XML.
" + }, + "InputFileSource":{ + "type":"structure", + "members":{ + "fileContent":{ + "shape":"InputFileSourceFileContentString", + "documentation":"Specify the input contents, as a string, for the source of an outbound transformation.
" + } + }, + "documentation":"The input file to use for an outbound transformation.
", + "union":true + }, + "InputFileSourceFileContentString":{ + "type":"string", + "max":5000000, + "min":1 + }, "InstructionsDocuments":{ "type":"list", "member":{"shape":"S3Location"}, @@ -1349,6 +1605,10 @@ "fault":true, "retryable":{"throttling":false} }, + "KeyList":{ + "type":"list", + "member":{"shape":"SampleDocumentKeys"} + }, "ListCapabilitiesRequest":{ "type":"structure", "members":{ @@ -1492,11 +1752,40 @@ "DISABLED" ] }, + "Mapping":{ + "type":"structure", + "required":["templateLanguage"], + "members":{ + "templateLanguage":{ + "shape":"MappingTemplateLanguage", + "documentation":"The transformation language for the template, either XSLT or JSONATA.
" + }, + "template":{ + "shape":"MappingTemplate", + "documentation":"A string that represents the mapping template, in the transformation language specified in templateLanguage
.
Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
" + }, "MappingTemplate":{ "type":"string", "max":350000, "min":0 }, + "MappingTemplateLanguage":{ + "type":"string", + "enum":[ + "XSLT", + "JSONATA" + ] + }, + "MappingType":{ + "type":"string", + "enum":[ + "JSONATA", + "XSLT" + ] + }, "MaxResults":{ "type":"integer", "box":true, @@ -1507,6 +1796,40 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "OutboundEdiOptions":{ + "type":"structure", + "members":{ + "x12":{ + "shape":"X12Envelope", + "documentation":"A structure that contains an X12 envelope structure.
" + } + }, + "documentation":"A container for outbound EDI options.
", + "union":true + }, + "OutputConversion":{ + "type":"structure", + "required":["toFormat"], + "members":{ + "toFormat":{ + "shape":"ToFormat", + "documentation":"The format for the output from an outbound transformer: only X12 is currently supported.
" + }, + "formatOptions":{ + "shape":"FormatOptions", + "documentation":"A structure that contains the X12 transaction set and version for the transformer output.
" + } + }, + "documentation":"Contains the formatting options for an outbound transformer (takes JSON or XML as input and converts it to an EDI document (currently only X12 format is supported).
" + }, + "OutputSampleFileSource":{ + "type":"structure", + "members":{ + "fileLocation":{"shape":"S3Location"} + }, + "documentation":"Container for the location of a sample file used for outbound transformations.
", + "union":true + }, "PageToken":{ "type":"string", "max":2048, @@ -1555,6 +1878,7 @@ "shape":"PartnershipCapabilities", "documentation":"Returns one or more capabilities associated with this partnership.
" }, + "capabilityOptions":{"shape":"CapabilityOptions"}, "tradingPartnerId":{ "shape":"TradingPartnerId", "documentation":"Returns the unique, system-generated identifier for a trading partner.
" @@ -1663,12 +1987,44 @@ "documentation":"Specifies the Amazon S3 key for the file location.
" } }, - "documentation":"Specifies the details for the Amazon S3 file location that is being used with Amazon Web Services B2BI Data Interchange. File locations in Amazon S3 are identified using a combination of the bucket and key.
" + "documentation":"Specifies the details for the Amazon S3 file location that is being used with Amazon Web Services B2B Data Interchange. File locations in Amazon S3 are identified using a combination of the bucket and key.
" }, "S3LocationList":{ "type":"list", "member":{"shape":"S3Location"} }, + "SampleDocumentKeys":{ + "type":"structure", + "members":{ + "input":{ + "shape":"S3Key", + "documentation":"An array of keys for your input sample documents.
" + }, + "output":{ + "shape":"S3Key", + "documentation":"An array of keys for your output sample documents.
" + } + }, + "documentation":"An array of the Amazon S3 keys used to identify the location for your sample documents.
" + }, + "SampleDocuments":{ + "type":"structure", + "required":[ + "bucketName", + "keys" + ], + "members":{ + "bucketName":{ + "shape":"BucketName", + "documentation":"Contains the Amazon S3 bucket that is used to hold your sample documents.
" + }, + "keys":{ + "shape":"KeyList", + "documentation":"Contains an array of the Amazon S3 keys used to identify the location for your sample documents.
" + } + }, + "documentation":"Describes a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.
" + }, "ServiceQuotaExceededException":{ "type":"structure", "required":[ @@ -1795,6 +2151,45 @@ "max":256, "min":0 }, + "TemplateDetails":{ + "type":"structure", + "members":{ + "x12":{"shape":"X12Details"} + }, + "documentation":"A data structure that contains the information to use when generating a mapping template.
", + "union":true + }, + "TestConversionRequest":{ + "type":"structure", + "required":[ + "source", + "target" + ], + "members":{ + "source":{ + "shape":"ConversionSource", + "documentation":"Specify the source file for an outbound EDI request.
" + }, + "target":{ + "shape":"ConversionTarget", + "documentation":"Specify the format (X12 is the only currently supported format), and other details for the conversion target.
" + } + } + }, + "TestConversionResponse":{ + "type":"structure", + "required":["convertedFileContent"], + "members":{ + "convertedFileContent":{ + "shape":"String", + "documentation":"Returns the converted file content.
" + }, + "validationMessages":{ + "shape":"ValidationMessages", + "documentation":"Returns an array of strings, each containing a message that Amazon Web Services B2B Data Interchange generates during the conversion.
" + } + } + }, "TestMappingInputFileContent":{ "type":"string", "max":5000000, @@ -1814,7 +2209,7 @@ }, "mappingTemplate":{ "shape":"MappingTemplate", - "documentation":"Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
" + "documentation":"Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
This parameter is available for backwards compatibility. Use the Mapping data type instead.
Returns the descriptive name for the transformer.
" }, + "status":{ + "shape":"TransformerStatus", + "documentation":"Returns the state of the newly created transformer. The transformer can be either active
or inactive
. For the transformer to be used in a capability, its status must active
.
Returns a timestamp indicating when the transformer was created. For example, 2023-07-20T19:58:44.624Z
.
Returns a timestamp representing the date and time for the most recent change for the transformer object.
" + }, "fileFormat":{ "shape":"FileFormat", - "documentation":"Returns that the currently supported file formats for EDI transformations are JSON
and XML
.
Returns that the currently supported file formats for EDI transformations are JSON
and XML
.
Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
" - }, - "status":{ - "shape":"TransformerStatus", - "documentation":"Returns the state of the newly created transformer. The transformer can be either active
or inactive
. For the transformer to be used in a capability, its status must active
.
Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "ediType":{ "shape":"EdiType", - "documentation":"Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
" + "documentation":"Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "sampleDocument":{ "shape":"FileLocation", - "documentation":"Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.
" + "documentation":"Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, - "createdAt":{ - "shape":"CreatedDate", - "documentation":"Returns a timestamp indicating when the transformer was created. For example, 2023-07-20T19:58:44.624Z
.
Returns a structure that contains the format options for the transformation.
" }, - "modifiedAt":{ - "shape":"ModifiedDate", - "documentation":"Returns a timestamp representing the date and time for the most recent change for the transformer object.
" + "mapping":{ + "shape":"Mapping", + "documentation":"Returns the structure that contains the mapping template and its language (either XSLT or JSONATA).
" + }, + "outputConversion":{ + "shape":"OutputConversion", + "documentation":"Returns the OutputConversion
object, which contains the format options for the outbound transformation.
Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.
" } }, - "documentation":"Contains the details for a transformer object. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file.
" + "documentation":"Contains the details for a transformer object. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.
" }, "UntagResourceRequest":{ "type":"structure", @@ -2070,6 +2491,10 @@ "capabilities":{ "shape":"PartnershipCapabilities", "documentation":"List of the capabilities associated with this partnership.
" + }, + "capabilityOptions":{ + "shape":"CapabilityOptions", + "documentation":"To update, specify the structure that contains the details for the associated capabilities.
" } } }, @@ -2110,6 +2535,10 @@ "shape":"PartnershipCapabilities", "documentation":"Returns one or more capabilities associated with this partnership.
" }, + "capabilityOptions":{ + "shape":"CapabilityOptions", + "documentation":"Returns the structure that contains the details for the associated capabilities.
" + }, "tradingPartnerId":{ "shape":"TradingPartnerId", "documentation":"Returns the unique, system-generated identifier for a trading partner.
" @@ -2215,25 +2644,49 @@ "shape":"TransformerName", "documentation":"Specify a new name for the transformer, if you want to update it.
" }, + "status":{ + "shape":"TransformerStatus", + "documentation":"Specifies the transformer's status. You can update the state of the transformer, from active
to inactive
, or inactive
to active
.
Specifies that the currently supported file formats for EDI transformations are JSON
and XML
.
Specifies that the currently supported file formats for EDI transformations are JSON
and XML
.
Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
" - }, - "status":{ - "shape":"TransformerStatus", - "documentation":"Specifies the transformer's status. You can update the state of the transformer, from active
to inactive
, or inactive
to active
.
Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
This parameter is available for backwards compatibility. Use the Mapping data type instead.
Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
" + "documentation":"Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "sampleDocument":{ "shape":"FileLocation", - "documentation":"Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data.
" + "documentation":"Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." + }, + "inputConversion":{ + "shape":"InputConversion", + "documentation":"To update, specify the InputConversion
object, which contains the format options for the inbound transformation.
Specify the structure that contains the mapping template and its language (either XSLT or JSONATA).
" + }, + "outputConversion":{ + "shape":"OutputConversion", + "documentation":"To update, specify the OutputConversion
object, which contains the format options for the outbound transformation.
Specify a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.
" } } }, @@ -2243,10 +2696,7 @@ "transformerId", "transformerArn", "name", - "fileFormat", - "mappingTemplate", "status", - "ediType", "createdAt", "modifiedAt" ], @@ -2263,33 +2713,57 @@ "shape":"TransformerName", "documentation":"Returns the name of the transformer.
" }, + "status":{ + "shape":"TransformerStatus", + "documentation":"Returns the state of the newly created transformer. The transformer can be either active
or inactive
. For the transformer to be used in a capability, its status must active
.
Returns a timestamp for creation date and time of the transformer.
" + }, + "modifiedAt":{ + "shape":"ModifiedDate", + "documentation":"Returns a timestamp for last time the transformer was modified.
" + }, "fileFormat":{ "shape":"FileFormat", - "documentation":"Returns that the currently supported file formats for EDI transformations are JSON
and XML
.
Returns that the currently supported file formats for EDI transformations are JSON
and XML
.
Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
" - }, - "status":{ - "shape":"TransformerStatus", - "documentation":"Returns the state of the newly created transformer. The transformer can be either active
or inactive
. For the transformer to be used in a capability, its status must active
.
Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "ediType":{ "shape":"EdiType", - "documentation":"Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
" + "documentation":"Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, "sampleDocument":{ "shape":"FileLocation", - "documentation":"Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.
" + "documentation":"Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.
", + "deprecated":true, + "deprecatedMessage":"This is a legacy trait. Please use input-conversion or output-conversion." }, - "createdAt":{ - "shape":"CreatedDate", - "documentation":"Returns a timestamp for creation date and time of the transformer.
" + "inputConversion":{ + "shape":"InputConversion", + "documentation":"Returns the InputConversion
object, which contains the format options for the inbound transformation.
Returns a timestamp for last time the transformer was modified.
" + "mapping":{ + "shape":"Mapping", + "documentation":"Returns the structure that contains the mapping template and its language (either XSLT or JSONATA).
" + }, + "outputConversion":{ + "shape":"OutputConversion", + "documentation":"Returns the OutputConversion
object, which contains the format options for the outbound transformation.
Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.
" } } }, @@ -2302,6 +2776,58 @@ "documentation":"Occurs when a B2BI object cannot be validated against a request from another object.
", "exception":true }, + "ValidationMessages":{ + "type":"list", + "member":{"shape":"String"} + }, + "X12AcknowledgmentRequestedCode":{ + "type":"string", + "max":1, + "min":1, + "pattern":"[a-zA-Z0-9]*" + }, + "X12ApplicationReceiverCode":{ + "type":"string", + "max":15, + "min":2, + "pattern":"[a-zA-Z0-9]*" + }, + "X12ApplicationSenderCode":{ + "type":"string", + "max":15, + "min":2, + "pattern":"[a-zA-Z0-9]*" + }, + "X12ComponentSeparator":{ + "type":"string", + "max":1, + "min":1, + "pattern":"[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]" + }, + "X12DataElementSeparator":{ + "type":"string", + "max":1, + "min":1, + "pattern":"[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]" + }, + "X12Delimiters":{ + "type":"structure", + "members":{ + "componentSeparator":{ + "shape":"X12ComponentSeparator", + "documentation":"The component, or sub-element, separator. The default value is :
(colon).
The data element separator. The default value is *
(asterisk).
The segment terminator. The default value is ~
(tilde).
In X12 EDI messages, delimiters are used to mark the end of segments or elements, and are defined in the interchange control header. The delimiters are part of the message's syntax and divide up its different elements.
" + }, "X12Details":{ "type":"structure", "members":{ @@ -2311,11 +2837,130 @@ }, "version":{ "shape":"X12Version", - "documentation":"Returns the version to use for the specified X12 transaction set.
</p>
"
+ "documentation":"Returns the version to use for the specified X12 transaction set.
" } }, "documentation":"A structure that contains the X12 transaction set and version. The X12 structure is used when the system transforms an EDI (electronic data interchange) file.
If an EDI input file contains more than one transaction, each transaction must have the same transaction set and version, for example 214/4010. If not, the transformer cannot parse the file.
A container for the X12 outbound EDI headers.
" + } + }, + "documentation":"A wrapper structure for an X12 definition object.
the X12 envelope ensures the integrity of the data and the efficiency of the information exchange. The X12 message structure has hierarchical levels. From highest to the lowest, they are:
Interchange Envelope
Functional Group
Transaction Set
A value representing the code used to identify the party transmitting a message, at position GS-02.
" + }, + "applicationReceiverCode":{ + "shape":"X12ApplicationReceiverCode", + "documentation":"A value representing the code used to identify the party receiving a message, at position GS-03.
" + }, + "responsibleAgencyCode":{ + "shape":"X12ResponsibleAgencyCode", + "documentation":"A code that identifies the issuer of the standard, at position GS-07.
" + } + }, + "documentation":"Part of the X12 message structure. These are the functional group headers for the X12 EDI object.
" + }, + "X12IdQualifier":{ + "type":"string", + "max":2, + "min":2, + "pattern":"[a-zA-Z0-9]*" + }, + "X12InterchangeControlHeaders":{ + "type":"structure", + "members":{ + "senderIdQualifier":{ + "shape":"X12IdQualifier", + "documentation":"Located at position ISA-05 in the header. Qualifier for the sender ID. Together, the ID and qualifier uniquely identify the sending trading partner.
" + }, + "senderId":{ + "shape":"X12SenderId", + "documentation":"Located at position ISA-06 in the header. This value (along with the senderIdQualifier
) identifies the sender of the interchange.
Located at position ISA-07 in the header. Qualifier for the receiver ID. Together, the ID and qualifier uniquely identify the receiving trading partner.
" + }, + "receiverId":{ + "shape":"X12ReceiverId", + "documentation":"Located at position ISA-08 in the header. This value (along with the receiverIdQualifier
) identifies the intended recipient of the interchange.
Located at position ISA-11 in the header. This string makes it easier when you need to group similar adjacent element values together without using extra segments.
This parameter is only honored for version greater than 401 (VERSION_4010
and higher).
For versions less than 401, this field is called StandardsId, in which case our service sets the value to U
.
Located at position ISA-14 in the header. The value \"1\" indicates that the sender is requesting an interchange acknowledgment at receipt of the interchange. The value \"0\" is used otherwise.
" + }, + "usageIndicatorCode":{ + "shape":"X12UsageIndicatorCode", + "documentation":"Located at position ISA-15 in the header. Specifies how this interchange is being used:
T
indicates this interchange is for testing.
P
indicates this interchange is for production.
I
indicates this interchange is informational.
In X12, the Interchange Control Header is the first segment of an EDI document and is part of the Interchange Envelope. It contains information about the sender and receiver, the date and time of transmission, and the X12 version being used. It also includes delivery information, such as the sender and receiver IDs.
" + }, + "X12OutboundEdiHeaders":{ + "type":"structure", + "members":{ + "interchangeControlHeaders":{ + "shape":"X12InterchangeControlHeaders", + "documentation":"In X12 EDI messages, delimiters are used to mark the end of segments or elements, and are defined in the interchange control header.
" + }, + "functionalGroupHeaders":{ + "shape":"X12FunctionalGroupHeaders", + "documentation":"The functional group headers for the X12 object.
" + }, + "delimiters":{ + "shape":"X12Delimiters", + "documentation":"The delimiters, for example semicolon (;
), that separates sections of the headers for the X12 object.
Specifies whether or not to validate the EDI for this X12 object: TRUE
or FALSE
.
A structure containing the details for an outbound EDI object.
" + }, + "X12ReceiverId":{ + "type":"string", + "max":15, + "min":15, + "pattern":"[a-zA-Z0-9]*" + }, + "X12RepetitionSeparator":{ + "type":"string", + "max":1, + "min":1 + }, + "X12ResponsibleAgencyCode":{ + "type":"string", + "max":2, + "min":1, + "pattern":"[a-zA-Z0-9]*" + }, + "X12SegmentTerminator":{ + "type":"string", + "max":1, + "min":1, + "pattern":"[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]" + }, + "X12SenderId":{ + "type":"string", + "max":15, + "min":15, + "pattern":"[a-zA-Z0-9]*" + }, "X12TransactionSet":{ "type":"string", "enum":[ @@ -2396,6 +3041,16 @@ "X12_999_X231" ] }, + "X12UsageIndicatorCode":{ + "type":"string", + "max":1, + "min":1, + "pattern":"[a-zA-Z0-9]*" + }, + "X12ValidateEdi":{ + "type":"boolean", + "box":true + }, "X12Version":{ "type":"string", "enum":[ diff --git a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json index cb8f10ca79..c639c4d7af 100644 --- a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json +++ b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json @@ -679,18 +679,18 @@ "members":{ "generationConfiguration":{ "shape":"ExternalSourcesGenerationConfiguration", - "documentation":"The prompt used with the external source wrapper object with the retrieveAndGenerate function.
" + "documentation":"The prompt used with the external source wrapper object with the retrieveAndGenerate
function.
The modelArn used with the external source wrapper object in the retrieveAndGenerate function.
" + "documentation":"The model Amazon Resource Name (ARN) for the external source wrapper object in the retrieveAndGenerate
function.
The document used with the external source wrapper object in the retrieveAndGenerate function.
" + "documentation":"The document for the external source wrapper object in the retrieveAndGenerate
function.
The configurations of the external source wrapper object in the retrieveAndGenerate function.
" + "documentation":"The configurations of the external source wrapper object in the retrieveAndGenerate
function.
Details about the response from the Lambda parsing of the output of the post-processing step.
" }, + "rawResponse":{"shape":"RawResponse"}, "traceId":{ "shape":"TraceId", "documentation":"The unique identifier of the trace.
" @@ -2322,10 +2324,12 @@ "PreProcessingModelInvocationOutput":{ "type":"structure", "members":{ + "metadata":{"shape":"Metadata"}, "parsedResponse":{ "shape":"PreProcessingParsedResponse", "documentation":"Details about the response from the Lambda parsing of the output of the pre-processing step.
" }, + "rawResponse":{"shape":"RawResponse"}, "traceId":{ "shape":"TraceId", "documentation":"The unique identifier of the trace.
" @@ -2760,15 +2764,15 @@ "members":{ "externalSourcesConfiguration":{ "shape":"ExternalSourcesRetrieveAndGenerateConfiguration", - "documentation":"The configuration used with the external source wrapper object in the retrieveAndGenerate function.
" + "documentation":"The configuration for the external source wrapper object in the retrieveAndGenerate
function.
Contains details about the resource being queried.
" + "documentation":"Contains details about the knowledge base for retrieving information and generating responses.
" }, "type":{ "shape":"RetrieveAndGenerateType", - "documentation":"The type of resource that is queried by the request.
" + "documentation":"The type of resource that contains your data for retrieving information and generating responses.
If you choose ot use EXTERNAL_SOURCES
, then currently only Claude 3 Sonnet models for knowledge bases are supported.
Contains details about the resource being queried.
This data type is used in the following API operations:
RetrieveAndGenerate request – in the retrieveAndGenerateConfiguration
field
The assessment details in the response from the guardrail.
" + }, + "guardrailCoverage":{ + "shape":"GuardrailCoverage", + "documentation":"The guardrail coverage details in the apply guardrail response.
" } } }, @@ -709,6 +713,10 @@ "contextualGroundingPolicy":{ "shape":"GuardrailContextualGroundingPolicyAssessment", "documentation":"The contextual grounding policy used for the guardrail assessment.
" + }, + "invocationMetrics":{ + "shape":"GuardrailInvocationMetrics", + "documentation":"The invocation metrics for the guardrail assessment.
" } }, "documentation":"A behavior assessment of the guardrail policies used in a call to the Converse API.
" @@ -780,6 +788,10 @@ "shape":"GuardrailContentFilterConfidence", "documentation":"The guardrail confidence.
" }, + "filterStrength":{ + "shape":"GuardrailContentFilterStrength", + "documentation":"The filter strength setting for the guardrail content filter.
" + }, "action":{ "shape":"GuardrailContentPolicyAction", "documentation":"The guardrail action.
" @@ -800,6 +812,15 @@ "type":"list", "member":{"shape":"GuardrailContentFilter"} }, + "GuardrailContentFilterStrength":{ + "type":"string", + "enum":[ + "NONE", + "LOW", + "MEDIUM", + "HIGH" + ] + }, "GuardrailContentFilterType":{ "type":"string", "enum":[ @@ -959,6 +980,16 @@ }, "documentation":"A text block that contains text that you want to assess with a guardrail. For more information, see GuardrailConverseContentBlock.
" }, + "GuardrailCoverage":{ + "type":"structure", + "members":{ + "textCharacters":{ + "shape":"GuardrailTextCharactersCoverage", + "documentation":"The text characters of the guardrail coverage details.
" + } + }, + "documentation":"The action of the guardrail coverage details.
" + }, "GuardrailCustomWord":{ "type":"structure", "required":[ @@ -987,6 +1018,24 @@ "min":0, "pattern":"(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))" }, + "GuardrailInvocationMetrics":{ + "type":"structure", + "members":{ + "guardrailProcessingLatency":{ + "shape":"GuardrailProcessingLatency", + "documentation":"The processing latency details for the guardrail invocation metrics.
" + }, + "usage":{ + "shape":"GuardrailUsage", + "documentation":"The usage details for the guardrail invocation metrics.
" + }, + "guardrailCoverage":{ + "shape":"GuardrailCoverage", + "documentation":"The coverage details for the guardrail invocation metrics.
" + } + }, + "documentation":"The invocation metrics for the guardrail.
" + }, "GuardrailManagedWord":{ "type":"structure", "required":[ @@ -1096,6 +1145,10 @@ "VEHICLE_IDENTIFICATION_NUMBER" ] }, + "GuardrailProcessingLatency":{ + "type":"long", + "box":true + }, "GuardrailRegexFilter":{ "type":"structure", "required":["action"], @@ -1204,6 +1257,20 @@ }, "documentation":"The text block to be evaluated by the guardrail.
" }, + "GuardrailTextCharactersCoverage":{ + "type":"structure", + "members":{ + "guarded":{ + "shape":"TextCharactersGuarded", + "documentation":"The text characters that were guarded by the guardrail coverage.
" + }, + "total":{ + "shape":"TextCharactersTotal", + "documentation":"The total text characters by the guardrail coverage.
" + } + }, + "documentation":"The guardrail coverage for the text characters.
" + }, "GuardrailTopic":{ "type":"structure", "required":[ @@ -1859,6 +1926,14 @@ "type":"list", "member":{"shape":"SystemContentBlock"} }, + "TextCharactersGuarded":{ + "type":"integer", + "box":true + }, + "TextCharactersTotal":{ + "type":"integer", + "box":true + }, "ThrottlingException":{ "type":"structure", "members":{ diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index d80b5640cc..0a8fd94de3 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -2711,6 +2711,7 @@ "bedrock" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2720,6 +2721,12 @@ }, "hostname" : "bedrock.ap-northeast-1.amazonaws.com" }, + "bedrock-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "bedrock.ap-northeast-2.amazonaws.com" + }, "bedrock-ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" @@ -2780,6 +2787,12 @@ }, "hostname" : "bedrock-fips.us-east-1.amazonaws.com" }, + "bedrock-fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "bedrock-fips.us-east-2.amazonaws.com" + }, "bedrock-fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -2792,6 +2805,12 @@ }, "hostname" : "bedrock-runtime.ap-northeast-1.amazonaws.com" }, + "bedrock-runtime-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "bedrock-runtime.ap-northeast-2.amazonaws.com" + }, "bedrock-runtime-ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" @@ -2852,6 +2871,12 @@ }, "hostname" : "bedrock-runtime-fips.us-east-1.amazonaws.com" }, + "bedrock-runtime-fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "bedrock-runtime-fips.us-east-2.amazonaws.com" + }, "bedrock-runtime-fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -2870,6 +2895,12 @@ }, "hostname" : "bedrock-runtime.us-east-1.amazonaws.com" }, + "bedrock-runtime-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "bedrock-runtime.us-east-2.amazonaws.com" + }, "bedrock-runtime-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -2888,6 +2919,12 @@ }, "hostname" : "bedrock.us-east-1.amazonaws.com" }, + "bedrock-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "bedrock.us-east-2.amazonaws.com" + }, "bedrock-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -2901,6 +2938,7 @@ "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-2" : { } } }, @@ -8432,28 +8470,133 @@ }, "firehose" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "firehose.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "firehose.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "firehose.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "firehose.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "firehose.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "firehose.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "firehose.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "firehose.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "firehose.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "firehose.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "firehose.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ap-southeast-5" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "firehose.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "firehose.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "firehose.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "firehose.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "firehose.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "firehose.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "firehose.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "firehose.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "firehose.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "firehose.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -8482,32 +8625,76 @@ "deprecated" : true, "hostname" : "firehose-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "firehose.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "firehose.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "firehose.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "firehose.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "firehose-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "firehose-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "firehose.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "firehose-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "firehose-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "firehose.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "firehose-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "firehose-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "firehose.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "firehose-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "firehose-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "firehose.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } diff --git a/botocore/data/iotdeviceadvisor/2020-09-18/endpoint-rule-set-1.json b/botocore/data/iotdeviceadvisor/2020-09-18/endpoint-rule-set-1.json index d65669e73e..7d33bc6ca6 100644 --- a/botocore/data/iotdeviceadvisor/2020-09-18/endpoint-rule-set-1.json +++ b/botocore/data/iotdeviceadvisor/2020-09-18/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/iotdeviceadvisor/2020-09-18/service-2.json b/botocore/data/iotdeviceadvisor/2020-09-18/service-2.json index cf68b11e45..d630648a4d 100644 --- a/botocore/data/iotdeviceadvisor/2020-09-18/service-2.json +++ b/botocore/data/iotdeviceadvisor/2020-09-18/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"api.iotdeviceadvisor", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"AWSIoTDeviceAdvisor", "serviceFullName":"AWS IoT Core Device Advisor", "serviceId":"IotDeviceAdvisor", "signatureVersion":"v4", "signingName":"iotdeviceadvisor", - "uid":"iotdeviceadvisor-2020-09-18" + "uid":"iotdeviceadvisor-2020-09-18", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateSuiteDefinition":{ @@ -232,6 +234,12 @@ "SignatureVersion4" ] }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\u0021-\\u007E]+$" + }, "ConflictException":{ "type":"structure", "members":{ @@ -255,6 +263,11 @@ "tags":{ "shape":"TagMap", "documentation":"The tags to be attached to the suite definition.
" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"The client token for the test suite definition creation. This token is used for tracking test suite definition creation using retries and obtaining its status. This parameter is optional.
", + "idempotencyToken":true } } }, diff --git a/botocore/data/ivs-realtime/2020-07-14/service-2.json b/botocore/data/ivs-realtime/2020-07-14/service-2.json index 6d76fe7d40..8feed73080 100644 --- a/botocore/data/ivs-realtime/2020-07-14/service-2.json +++ b/botocore/data/ivs-realtime/2020-07-14/service-2.json @@ -1321,7 +1321,7 @@ }, "errorCode":{ "shape":"EventErrorCode", - "documentation":"If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. INSUFFICIENT_CAPABILITIES
indicates that the participant tried to take an action that the participant’s token is not allowed to do. For more information about participant capabilities, see the capabilities
field in CreateParticipantToken. QUOTA_EXCEEDED
indicates that the number of participants who want to publish/subscribe to a stage exceeds the quota; for more information, see Service Quotas. PUBLISHER_NOT_FOUND
indicates that the participant tried to subscribe to a publisher that doesn’t exist.
If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null.
B_FRAME_PRESENT
— The participant's stream includes B-frames. For details, see IVS RTMP Publishing.
BITRATE_EXCEEDED
— The participant exceeded the maximum supported bitrate. For details, see Service Quotas.
INSUFFICIENT_CAPABILITIES
— The participant tried to take an action that the participant’s token is not allowed to do. For details on participant capabilities, see the capabilities
field in CreateParticipantToken.
INTERNAL_SERVER_EXCEPTION
— The participant failed to publish to the stage due to an internal server error.
INVALID_AUDIO_CODEC
— The participant is using an invalid audio codec. For details, see Stream Ingest.
INVALID_INPUT
— The participant is using an invalid input stream.
INVALID_PROTOCOL
— The participant's IngestConfiguration resource is configured for RTMPS but they tried streaming with RTMP. For details, see IVS RTMP Publishing.
INVALID_STREAM_KEY
— The participant is using an invalid stream key. For details, see IVS RTMP Publishing.
INVALID_VIDEO_CODEC
— The participant is using an invalid video codec. For details, see Stream Ingest.
PUBLISHER_NOT_FOUND
— The participant tried to subscribe to a publisher that doesn’t exist.
QUOTA_EXCEEDED
— The number of participants who want to publish/subscribe to a stage exceeds the quota. For details, see Service Quotas.
RESOLUTION_EXCEEDED
— The participant exceeded the maximum supported resolution. For details, see Service Quotas.
REUSE_OF_STREAM_KEY
— The participant tried to use a stream key that is associated with another active stage session.
STREAM_DURATION_EXCEEDED
— The participant exceeded the maximum allowed stream duration. For details, see Service Quotas.
An occurrence during a stage session.
" @@ -1339,7 +1339,10 @@ "INVALID_VIDEO_CODEC", "INVALID_PROTOCOL", "INVALID_STREAM_KEY", - "REUSE_OF_STREAM_KEY" + "REUSE_OF_STREAM_KEY", + "B_FRAME_PRESENT", + "INVALID_INPUT", + "INTERNAL_SERVER_EXCEPTION" ] }, "EventList":{ @@ -1571,7 +1574,7 @@ "type":"integer", "box":true, "max":1920, - "min":1 + "min":2 }, "ImportPublicKeyRequest":{ "type":"structure", @@ -3112,11 +3115,11 @@ "members":{ "width":{ "shape":"Width", - "documentation":"Video-resolution width. Note that the maximum value is determined by width
times height
, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 1280.
Video-resolution width. This must be an even number. Note that the maximum value is determined by width
times height
, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 1280.
Video-resolution height. Note that the maximum value is determined by width
times height
, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 720.
Video-resolution height. This must be an even number. Note that the maximum value is determined by width
times height
, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 720.
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If you enable x-amz-checksum-mode
in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported by directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The following actions are related to HeadObject
:
The HEAD
operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic code, such as 400 Bad Request
, 403 Forbidden
, 404 Not Found
, 405 Method Not Allowed
, 412 Precondition Failed
, or 304 Not Modified
. It's not possible to retrieve the exact exception of these error codes.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
General purpose bucket permissions - To use HEAD
, you must have the s3:GetObject
permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.
If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
error.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
If you enable x-amz-checksum-mode
in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey
and kms:Decrypt
permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for HEAD
requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
header is used when you PUT
an object to S3 and want to specify the encryption method. If you include this header in a HEAD
request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request
error. It's because the encryption method can't be changed when you retrieve the object.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
If the specified version is a delete marker, the response returns a 405 Method Not Allowed
error and the Last-Modified: timestamp
response header.
Directory buckets - Delete marker is not supported by directory buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The following actions are related to HeadObject
:
This operation is not supported by directory buckets.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the following:
A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.
A status indicating whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to PutBucketLifecycleConfiguration
:
This operation is not supported by directory buckets.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.
Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
A lifecycle rule consists of the following:
A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.
A status indicating whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to PutBucketLifecycleConfiguration
:
The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumSHA1":{ "shape":"ChecksumSHA1", @@ -2037,11 +2038,11 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumSHA1":{ "shape":"ChecksumSHA1", @@ -2118,13 +2119,13 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32c" }, @@ -2198,11 +2199,11 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumSHA1":{ "shape":"ChecksumSHA1", @@ -2604,11 +2605,11 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumSHA1":{ "shape":"ChecksumSHA1", @@ -2634,11 +2635,11 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumSHA1":{ "shape":"ChecksumSHA1", @@ -3669,7 +3670,7 @@ }, "ChecksumAlgorithm":{ "shape":"ChecksumAlgorithm", - "documentation":"Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC32
CRC32C
SHA1
SHA256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 ignores any provided ChecksumAlgorithm
parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm
.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC32
CRC32C
SHA1
SHA256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 ignores any provided ChecksumAlgorithm
parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm
.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Container for a lifecycle rule.
", "locationName":"Rule" + }, + "TransitionDefaultMinimumObjectSize":{ + "shape":"TransitionDefaultMinimumObjectSize", + "documentation":"Indicates which default minimum object size behavior is applied to the lifecycle configuration.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32c" }, @@ -5630,13 +5637,13 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32c" }, @@ -7913,11 +7920,11 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumSHA1":{ "shape":"ChecksumSHA1", @@ -8116,11 +8123,11 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
" }, "ChecksumSHA1":{ "shape":"ChecksumSHA1", @@ -8556,6 +8563,17 @@ }, "payload":"InventoryConfiguration" }, + "PutBucketLifecycleConfigurationOutput":{ + "type":"structure", + "members":{ + "TransitionDefaultMinimumObjectSize":{ + "shape":"TransitionDefaultMinimumObjectSize", + "documentation":"Indicates which default minimum object size behavior is applied to the lifecycle configuration.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden
(access denied).
Indicates which default minimum object size behavior is applied to the lifecycle configuration.
all_storage_classes_128K
- Objects smaller than 128 KB will not transition to any storage class by default.
varies_by_storage_class
- Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.
To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan
or ObjectSizeLessThan
in the body of your transition rule. Custom filters always take precedence over the default transition behavior.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC32
CRC32C
SHA1
SHA256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 ignores any provided ChecksumAlgorithm
parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm
.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC32
CRC32C
SHA1
SHA256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 ignores any provided ChecksumAlgorithm
parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm
.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32c" }, @@ -9463,19 +9487,19 @@ }, "ChecksumAlgorithm":{ "shape":"ChecksumAlgorithm", - "documentation":"Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC32
CRC32C
SHA1
SHA256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 ignores any provided ChecksumAlgorithm
parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm
.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
.
For the x-amz-checksum-algorithm
header, replace algorithm
with the supported algorithm from the following list:
CRC32
CRC32C
SHA1
SHA256
For more information, see Checking object integrity in the Amazon S3 User Guide.
If the individual checksum value you provide through x-amz-checksum-algorithm
doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm
, Amazon S3 ignores any provided ChecksumAlgorithm
parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm
.
For directory buckets, when you use Amazon Web Services SDKs, CRC32
is the default checksum algorithm that's used for performance.
This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32c" }, @@ -10990,6 +11014,13 @@ }, "documentation":"Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon S3 User Guide.
" }, + "TransitionDefaultMinimumObjectSize":{ + "type":"string", + "enum":[ + "varies_by_storage_class", + "all_storage_classes_128K" + ] + }, "TransitionList":{ "type":"list", "member":{"shape":"Transition"}, @@ -11210,13 +11241,13 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32c" }, @@ -11304,13 +11335,13 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", "location":"header", "locationName":"x-amz-checksum-crc32c" }, @@ -11521,13 +11552,13 @@ }, "ChecksumCRC32":{ "shape":"ChecksumCRC32", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", "location":"header", "locationName":"x-amz-fwd-header-x-amz-checksum-crc32" }, "ChecksumCRC32C":{ "shape":"ChecksumCRC32C", - "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", + "documentation":"This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
", "location":"header", "locationName":"x-amz-fwd-header-x-amz-checksum-crc32c" }, diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index e63889326a..6586ab6ddc 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -7690,6 +7690,10 @@ "AppLifecycleManagement":{ "shape":"AppLifecycleManagement", "documentation":"Settings that are used to configure and manage the lifecycle of CodeEditor applications.
" + }, + "BuiltInLifecycleConfigArn":{ + "shape":"StudioLifecycleConfigArn", + "documentation":"The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default lifecycle configuration.
" } }, "documentation":"The Code Editor application settings.
For more information about Code Editor, see Get started with Code Editor in Amazon SageMaker.
" @@ -12926,6 +12930,10 @@ "ResourceSpec":{ "shape":"ResourceSpec", "documentation":"The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.
" + }, + "BuiltInLifecycleConfigArn":{ + "shape":"StudioLifecycleConfigArn", + "documentation":"The lifecycle configuration that runs before the default lifecycle configuration
" } } }, @@ -22380,6 +22388,10 @@ "EmrSettings":{ "shape":"EmrSettings", "documentation":"The configuration parameters that specify the IAM roles assumed by the execution role of SageMaker (assumable roles) and the cluster instances or job execution environments (execution roles or runtime roles) to manage and access resources required for running Amazon EMR clusters or Amazon EMR Serverless applications.
" + }, + "BuiltInLifecycleConfigArn":{ + "shape":"StudioLifecycleConfigArn", + "documentation":"The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default lifecycle configuration.
" } }, "documentation":"The settings for the JupyterLab application.
" diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index 6a7ea5c802..da53c340cd 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -293,7 +293,7 @@ {"shape":"ResourceLimitExceededException"}, {"shape":"InvalidParameterValuesException"} ], - "documentation":"Creates one or more WorkSpaces.
This operation is asynchronous and returns before the WorkSpaces are created.
The MANUAL
running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.
You don't need to specify the PCOIP
protocol for Linux bundles because WSP
is the default protocol for those bundles.
User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core.
Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?
Creates one or more WorkSpaces.
This operation is asynchronous and returns before the WorkSpaces are created.
The MANUAL
running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.
You don't need to specify the PCOIP
protocol for Linux bundles because DCV
(formerly WSP) is the default protocol for those bundles.
User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core.
Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?
The ingestion process to be used when importing the image, depending on which protocol you want to use for your BYOL Workspace image, either PCoIP, WorkSpaces Streaming Protocol (WSP), or bring your own protocol (BYOP). To use WSP, specify a value that ends in _WSP
. To use PCoIP, specify a value that does not end in _WSP
. To use BYOP, specify a value that ends in _BYOP
.
For non-GPU-enabled bundles (bundles other than Graphics or GraphicsPro), specify BYOL_REGULAR
, BYOL_REGULAR_WSP
, or BYOL_REGULAR_BYOP
, depending on the protocol.
The BYOL_REGULAR_BYOP
and BYOL_GRAPHICS_G4DN_BYOP
values are only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use these values. For more information, see Amazon WorkSpaces Core.
The ingestion process to be used when importing the image, depending on which protocol you want to use for your BYOL Workspace image, either PCoIP, DCV, or bring your own protocol (BYOP). To use WSP, specify a value that ends in _DCV
. To use PCoIP, specify a value that does not end in _DCV
. To use BYOP, specify a value that ends in _BYOP
.
For non-GPU-enabled bundles (bundles other than Graphics or GraphicsPro), specify BYOL_REGULAR
, BYOL_REGULAR_DCV
, or BYOL_REGULAR_BYOP
, depending on the protocol.
The BYOL_REGULAR_BYOP
and BYOL_GRAPHICS_G4DN_BYOP
values are only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use these values. For more information, see Amazon WorkSpaces Core.
If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses.
Although this parameter is an array, only one item is allowed at this time.
During the image import process, non-GPU WSP WorkSpaces with Windows 11 support only Microsoft_Office_2019
. GPU WSP WorkSpaces with Windows 11 do not support Office installation.
If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses.
Although this parameter is an array, only one item is allowed at this time.
During the image import process, non-GPU DCV (formerly WSP) WorkSpaces with Windows 11 support only Microsoft_Office_2019
. GPU DCV (formerly WSP) WorkSpaces with Windows 11 do not support Office installation.
The protocol. For more information, see Protocols for Amazon WorkSpaces.
Only available for WorkSpaces created with PCoIP bundles.
The Protocols
property is case sensitive. Ensure you use PCOIP
or WSP
.
Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn).
The protocol. For more information, see Protocols for Amazon WorkSpaces.
Only available for WorkSpaces created with PCoIP bundles.
The Protocols
property is case sensitive. Ensure you use PCOIP
or DCV
(formerly WSP).
Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn).