diff --git a/.changes/next-release/api-change-acm-23445.json b/.changes/next-release/api-change-acm-23445.json new file mode 100644 index 0000000000..647973af68 --- /dev/null +++ b/.changes/next-release/api-change-acm-23445.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``acm``", + "description": "add v2 smoke tests and smithy smokeTests trait for SDK testing." +} diff --git a/.changes/next-release/api-change-bedrockagent-86100.json b/.changes/next-release/api-change-bedrockagent-86100.json new file mode 100644 index 0000000000..5ef0feb893 --- /dev/null +++ b/.changes/next-release/api-change-bedrockagent-86100.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-agent``", + "description": "With this release, Knowledge bases for Bedrock adds support for Titan Text Embedding v2." +} diff --git a/.changes/next-release/api-change-bedrockruntime-15865.json b/.changes/next-release/api-change-bedrockruntime-15865.json new file mode 100644 index 0000000000..7ed150636d --- /dev/null +++ b/.changes/next-release/api-change-bedrockruntime-15865.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-runtime``", + "description": "This release adds Converse and ConverseStream APIs to Bedrock Runtime" +} diff --git a/.changes/next-release/api-change-cloudtrail-36744.json b/.changes/next-release/api-change-cloudtrail-36744.json new file mode 100644 index 0000000000..75fb0d4a77 --- /dev/null +++ b/.changes/next-release/api-change-cloudtrail-36744.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``cloudtrail``", + "description": "CloudTrail Lake returns PartitionKeys in the GetEventDataStore API response. Events are grouped into partitions based on these keys for better query performance. For example, the calendarday key groups events by day, while combining the calendarday key with the hour key groups them by day and hour." +} diff --git a/.changes/next-release/api-change-connect-97102.json b/.changes/next-release/api-change-connect-97102.json new file mode 100644 index 0000000000..2634af1eeb --- /dev/null +++ b/.changes/next-release/api-change-connect-97102.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``connect``", + "description": "Adding associatedQueueIds as a SearchCriteria and response field to the SearchRoutingProfiles API" +} diff --git a/.changes/next-release/api-change-emrserverless-68781.json b/.changes/next-release/api-change-emrserverless-68781.json new file mode 100644 index 0000000000..23343c662a --- /dev/null +++ b/.changes/next-release/api-change-emrserverless-68781.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``emr-serverless``", + "description": "The release adds support for spark structured streaming." +} diff --git a/.changes/next-release/api-change-rds-71252.json b/.changes/next-release/api-change-rds-71252.json new file mode 100644 index 0000000000..c95b8b33ab --- /dev/null +++ b/.changes/next-release/api-change-rds-71252.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``rds``", + "description": "Updates Amazon RDS documentation for Aurora Postgres DBname." +} diff --git a/.changes/next-release/api-change-sagemaker-58609.json b/.changes/next-release/api-change-sagemaker-58609.json new file mode 100644 index 0000000000..f2cfb5cf7a --- /dev/null +++ b/.changes/next-release/api-change-sagemaker-58609.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``sagemaker``", + "description": "Adds Model Card information as a new component to Model Package. Autopilot launches algorithm selection for TimeSeries modality to generate AutoML candidates per algorithm." +} diff --git a/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json b/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json index 671234b4f5..e0738bc0b1 100644 --- a/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json +++ b/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/acm/2015-12-08/service-2.json b/botocore/data/acm/2015-12-08/service-2.json index c3fac7b087..e2d91e26a1 100644 --- a/botocore/data/acm/2015-12-08/service-2.json +++ b/botocore/data/acm/2015-12-08/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"acm", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"ACM", "serviceFullName":"AWS Certificate Manager", "serviceId":"ACM", diff --git a/botocore/data/bedrock-agent/2023-06-05/service-2.json b/botocore/data/bedrock-agent/2023-06-05/service-2.json index ec10d6386b..b76b1841ce 100644 --- a/botocore/data/bedrock-agent/2023-06-05/service-2.json +++ b/botocore/data/bedrock-agent/2023-06-05/service-2.json @@ -1546,6 +1546,16 @@ "min":20, "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" }, + "BedrockEmbeddingModelConfiguration":{ + "type":"structure", + "members":{ + "dimensions":{ + "shape":"Dimensions", + "documentation":"
The dimensions details for the vector configuration used on the Bedrock embeddings model.
" + } + }, + "documentation":"The vector configuration details for the Bedrock embeddings model.
" + }, "Boolean":{ "type":"boolean", "box":true @@ -2275,6 +2285,12 @@ "max":200, "min":1 }, + "Dimensions":{ + "type":"integer", + "box":true, + "max":4096, + "min":0 + }, "DisassociateAgentKnowledgeBaseRequest":{ "type":"structure", "required":[ @@ -2314,6 +2330,16 @@ "min":5, "pattern":"^DRAFT$" }, + "EmbeddingModelConfiguration":{ + "type":"structure", + "members":{ + "bedrockEmbeddingModelConfiguration":{ + "shape":"BedrockEmbeddingModelConfiguration", + "documentation":"The vector configuration details on the Bedrock embeddings model.
" + } + }, + "documentation":"The configuration details for the embeddings model.
" + }, "FailureReason":{ "type":"string", "max":2048, @@ -4653,6 +4679,10 @@ "embeddingModelArn":{ "shape":"BedrockEmbeddingModelArn", "documentation":"The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.
" + }, + "embeddingModelConfiguration":{ + "shape":"EmbeddingModelConfiguration", + "documentation":"The embeddings model configuration details for the vector model used in Knowledge Base.
" } }, "documentation":"Contains details about the model used to create vector embeddings for the knowledge base.
" diff --git a/botocore/data/bedrock-runtime/2023-09-30/service-2.json b/botocore/data/bedrock-runtime/2023-09-30/service-2.json index 5d545913e0..97b0648310 100644 --- a/botocore/data/bedrock-runtime/2023-09-30/service-2.json +++ b/botocore/data/bedrock-runtime/2023-09-30/service-2.json @@ -12,6 +12,48 @@ "uid":"bedrock-runtime-2023-09-30" }, "operations":{ + "Converse":{ + "name":"Converse", + "http":{ + "method":"POST", + "requestUri":"/model/{modelId}/converse", + "responseCode":200 + }, + "input":{"shape":"ConverseRequest"}, + "output":{"shape":"ConverseResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ModelTimeoutException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ModelNotReadyException"}, + {"shape":"ModelErrorException"} + ], + "documentation":"Sends messages to the specified Amazon Bedrock model. Converse
provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For more information, see Run inference in the Bedrock User Guide.
This operation requires permission for the bedrock:InvokeModel
action.
Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream
provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For more information, see Run inference in the Bedrock User Guide.
To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported
field in the response.
For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide.
This operation requires permission for the bedrock:InvokeModelWithResponseStream
action.
The model must request at least one tool (no text is generated).
" + }, + "AutoToolChoice":{ + "type":"structure", + "members":{ + }, + "documentation":"The Model automatically decides if a tool should be called or to whether to generate text instead.
" + }, "Body":{ "type":"blob", "max":25000000, "min":0, "sensitive":true }, + "ContentBlock":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"Text to include in the message.
" + }, + "image":{ + "shape":"ImageBlock", + "documentation":"Image to include in the message.
This field is only supported by Anthropic Claude 3 models.
Information about a tool use request from a model.
" + }, + "toolResult":{ + "shape":"ToolResultBlock", + "documentation":"The result for a tool request that a model makes.
" + } + }, + "documentation":"A block of content for a message.
", + "union":true + }, + "ContentBlockDelta":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"The content text.
" + }, + "toolUse":{ + "shape":"ToolUseBlockDelta", + "documentation":"Information about a tool that the model is requesting to use.
" + } + }, + "documentation":"A bock of content in a streaming response.
", + "union":true + }, + "ContentBlockDeltaEvent":{ + "type":"structure", + "required":[ + "delta", + "contentBlockIndex" + ], + "members":{ + "delta":{ + "shape":"ContentBlockDelta", + "documentation":"The delta for a content block delta event.
" + }, + "contentBlockIndex":{ + "shape":"NonNegativeInteger", + "documentation":"The block index for a content block delta event.
" + } + }, + "documentation":"The content block delta event.
", + "event":true + }, + "ContentBlockStart":{ + "type":"structure", + "members":{ + "toolUse":{ + "shape":"ToolUseBlockStart", + "documentation":"Information about a tool that the model is requesting to use.
" + } + }, + "documentation":"Content block start information.
", + "union":true + }, + "ContentBlockStartEvent":{ + "type":"structure", + "required":[ + "start", + "contentBlockIndex" + ], + "members":{ + "start":{ + "shape":"ContentBlockStart", + "documentation":"Start information about a content block start event.
" + }, + "contentBlockIndex":{ + "shape":"NonNegativeInteger", + "documentation":"The index for a content block start event.
" + } + }, + "documentation":"Content block start event.
", + "event":true + }, + "ContentBlockStopEvent":{ + "type":"structure", + "required":["contentBlockIndex"], + "members":{ + "contentBlockIndex":{ + "shape":"NonNegativeInteger", + "documentation":"The index for a content block.
" + } + }, + "documentation":"A content block stop event.
", + "event":true + }, + "ContentBlocks":{ + "type":"list", + "member":{"shape":"ContentBlock"} + }, + "ConversationRole":{ + "type":"string", + "enum":[ + "user", + "assistant" + ] + }, + "ConversationalModelId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)" + }, + "ConverseMetrics":{ + "type":"structure", + "required":["latencyMs"], + "members":{ + "latencyMs":{ + "shape":"Long", + "documentation":"The latency of the call to Converse
, in milliseconds.
Metrics for a call to Converse.
" + }, + "ConverseOutput":{ + "type":"structure", + "members":{ + "message":{ + "shape":"Message", + "documentation":"The message that the model generates.
" + } + }, + "documentation":"The output from a call to Converse.
", + "union":true + }, + "ConverseRequest":{ + "type":"structure", + "required":[ + "modelId", + "messages" + ], + "members":{ + "modelId":{ + "shape":"ConversationalModelId", + "documentation":"The identifier for the model that you want to call.
The modelId
to provide depends on the type of model that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
The messages that you want to send to the model.
" + }, + "system":{ + "shape":"SystemContentBlocks", + "documentation":"A system prompt to pass to the model.
" + }, + "inferenceConfig":{ + "shape":"InferenceConfiguration", + "documentation":"Inference parameters to pass to the model. Converse
supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields
request field.
Configuration information for the tools that the model can use when generating a response.
This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.
Additional inference parameters that the model supports, beyond the base set of inference parameters that Converse
supports in the inferenceConfig
field. For more information, see Model parameters.
Additional model parameters field paths to return in the response. Converse
returns the requested fields as a JSON Pointer object in the additionalModelResultFields
field. The following is example JSON for additionalModelResponseFieldPaths
.
[ \"/stop_sequence\" ]
For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation.
Converse
rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400
error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by Converse
.
The result from the call to Converse
.
The reason why the model stopped generating output.
" + }, + "usage":{ + "shape":"TokenUsage", + "documentation":"The total number of tokens used in the call to Converse
. The total includes the tokens input to the model and the tokens generated by the model.
Metrics for the call to Converse
.
Additional fields in the response that are unique to the model.
" + } + } + }, + "ConverseStreamMetadataEvent":{ + "type":"structure", + "required":[ + "usage", + "metrics" + ], + "members":{ + "usage":{ + "shape":"TokenUsage", + "documentation":"Usage information for the conversation stream event.
" + }, + "metrics":{ + "shape":"ConverseStreamMetrics", + "documentation":"The metrics for the conversation stream metadata event.
" + } + }, + "documentation":"A conversation stream metadata event.
", + "event":true + }, + "ConverseStreamMetrics":{ + "type":"structure", + "required":["latencyMs"], + "members":{ + "latencyMs":{ + "shape":"Long", + "documentation":"The latency for the streaming request, in milliseconds.
" + } + }, + "documentation":"Metrics for the stream.
" + }, + "ConverseStreamOutput":{ + "type":"structure", + "members":{ + "messageStart":{ + "shape":"MessageStartEvent", + "documentation":"Message start information.
" + }, + "contentBlockStart":{ + "shape":"ContentBlockStartEvent", + "documentation":"Start information for a content block.
" + }, + "contentBlockDelta":{ + "shape":"ContentBlockDeltaEvent", + "documentation":"The messages output content block delta.
" + }, + "contentBlockStop":{ + "shape":"ContentBlockStopEvent", + "documentation":"Stop information for a content block.
" + }, + "messageStop":{ + "shape":"MessageStopEvent", + "documentation":"Message stop information.
" + }, + "metadata":{ + "shape":"ConverseStreamMetadataEvent", + "documentation":"Metadata for the converse output stream.
" + }, + "internalServerException":{ + "shape":"InternalServerException", + "documentation":"An internal server error occurred. Retry your request.
" + }, + "modelStreamErrorException":{ + "shape":"ModelStreamErrorException", + "documentation":"A streaming error occurred. Retry your request.
" + }, + "validationException":{ + "shape":"ValidationException", + "documentation":"Input validation failed. Check your request parameters and retry the request.
" + }, + "throttlingException":{ + "shape":"ThrottlingException", + "documentation":"The number of requests exceeds the limit. Resubmit your request later.
" + } + }, + "documentation":"The messages output stream
", + "eventstream":true + }, + "ConverseStreamRequest":{ + "type":"structure", + "required":[ + "modelId", + "messages" + ], + "members":{ + "modelId":{ + "shape":"ConversationalModelId", + "documentation":"The ID for the model.
The modelId
to provide depends on the type of model that you use:
If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide.
If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide.
If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
The messages that you want to send to the model.
" + }, + "system":{ + "shape":"SystemContentBlocks", + "documentation":"A system prompt to send to the model.
" + }, + "inferenceConfig":{ + "shape":"InferenceConfiguration", + "documentation":"Inference parameters to pass to the model. ConverseStream
supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields
request field.
Configuration information for the tools that the model can use when generating a response.
This field is only supported by Anthropic Claude 3 models.
Additional inference parameters that the model supports, beyond the base set of inference parameters that ConverseStream
supports in the inferenceConfig
field.
Additional model parameters field paths to return in the response. ConverseStream
returns the requested fields as a JSON Pointer object in the additionalModelResultFields
field. The following is example JSON for additionalModelResponseFieldPaths
.
[ \"/stop_sequence\" ]
For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation.
ConverseStream
rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400
error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by ConverseStream
.
The output stream that the model generated.
" + } + }, + "payload":"stream" + }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, "GuardrailIdentifier":{ "type":"string", "max":2048, @@ -87,6 +503,93 @@ "type":"string", "pattern":"(([1-9][0-9]{0,7})|(DRAFT))" }, + "ImageBlock":{ + "type":"structure", + "required":[ + "format", + "source" + ], + "members":{ + "format":{ + "shape":"ImageFormat", + "documentation":"The format of the image.
" + }, + "source":{ + "shape":"ImageSource", + "documentation":"The source for the image.
" + } + }, + "documentation":"Image content for a message.
" + }, + "ImageFormat":{ + "type":"string", + "enum":[ + "png", + "jpeg", + "gif", + "webp" + ] + }, + "ImageSource":{ + "type":"structure", + "members":{ + "bytes":{ + "shape":"ImageSourceBytesBlob", + "documentation":"The raw image bytes for the image. If you use an AWS SDK, you don't need to base64 encode the image bytes.
" + } + }, + "documentation":"The source for an image.
", + "union":true + }, + "ImageSourceBytesBlob":{ + "type":"blob", + "min":1 + }, + "InferenceConfiguration":{ + "type":"structure", + "members":{ + "maxTokens":{ + "shape":"InferenceConfigurationMaxTokensInteger", + "documentation":"The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value for the model that you are using. For more information, see Inference parameters for foundatio{ \"messages\": [ { \"role\": \"user\", \"content\": [ { \"text\": \"what's the weather in Queens, NY and Austin, TX?\" } ] }, { \"role\": \"assistant\", \"content\": [ { \"toolUse\": { \"toolUseId\": \"1\", \"name\": \"get_weather\", \"input\": { \"city\": \"Queens\", \"state\": \"NY\" } } }, { \"toolUse\": { \"toolUseId\": \"2\", \"name\": \"get_weather\", \"input\": { \"city\": \"Austin\", \"state\": \"TX\" } } } ] }, { \"role\": \"user\", \"content\": [ { \"toolResult\": { \"toolUseId\": \"2\", \"content\": [ { \"json\": { \"weather\": \"40\" } } ] } }, { \"text\": \"...\" }, { \"toolResult\": { \"toolUseId\": \"1\", \"content\": [ { \"text\": \"result text\" } ] } } ] } ], \"toolConfig\": { \"tools\": [ { \"name\": \"get_weather\", \"description\": \"Get weather\", \"inputSchema\": { \"type\": \"object\", \"properties\": { \"city\": { \"type\": \"string\", \"description\": \"City of location\" }, \"state\": { \"type\": \"string\", \"description\": \"State of location\" } }, \"required\": [\"city\", \"state\"] } } ] } } n models.
" + }, + "temperature":{ + "shape":"InferenceConfigurationTemperatureFloat", + "documentation":"The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.
The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models.
" + }, + "topP":{ + "shape":"InferenceConfigurationTopPFloat", + "documentation":"The percentage of most-likely candidates that the model considers for the next token. For example, if you choose a value of 0.8 for topP
, the model selects from the top 80% of the probability distribution of tokens that could be next in the sequence.
The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models.
" + }, + "stopSequences":{ + "shape":"InferenceConfigurationStopSequencesList", + "documentation":"A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.
" + } + }, + "documentation":"Base inference parameters to pass to a model in a call to Converse or ConverseStream. For more information, see Inference parameters for foundation models.
If you need to pass additional parameters that the model supports, use the additionalModelRequestFields
request field in the call to Converse
or ConverseStream
. For more information, see Model parameters.
The role that the message plays in the message.
" + }, + "content":{ + "shape":"ContentBlocks", + "documentation":"The message content.
" + } + }, + "documentation":"A message in the Message field. Use to send a message in a call to Converse.
" + }, + "MessageStartEvent":{ + "type":"structure", + "required":["role"], + "members":{ + "role":{ + "shape":"ConversationRole", + "documentation":"The role for the message.
" + } + }, + "documentation":"The start of a message.
", + "event":true + }, + "MessageStopEvent":{ + "type":"structure", + "required":["stopReason"], + "members":{ + "stopReason":{ + "shape":"StopReason", + "documentation":"The reason why the model stopped generating output.
" + }, + "additionalModelResponseFields":{ + "shape":"Document", + "documentation":"The additional model response fields.
" + } + }, + "documentation":"The stop event for a message.
", + "event":true + }, + "Messages":{ + "type":"list", + "member":{"shape":"Message"} + }, "MimeType":{"type":"string"}, "ModelErrorException":{ "type":"structure", @@ -312,6 +869,15 @@ "type":"string", "pattern":"[\\s\\S]*" }, + "NonEmptyString":{ + "type":"string", + "min":1 + }, + "NonNegativeInteger":{ + "type":"integer", + "box":true, + "min":0 + }, "PartBody":{ "type":"blob", "max":1000000, @@ -385,12 +951,49 @@ }, "exception":true }, + "SpecificToolChoice":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"ToolName", + "documentation":"The name of the tool that the model must request.
" + } + }, + "documentation":"The model must request a specific tool.
This field is only supported by Anthropic Claude 3 models.
A system prompt for the model.
" + } + }, + "documentation":"A system content block
", + "union":true + }, + "SystemContentBlocks":{ + "type":"list", + "member":{"shape":"SystemContentBlock"} + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -403,6 +1006,243 @@ }, "exception":true }, + "TokenUsage":{ + "type":"structure", + "required":[ + "inputTokens", + "outputTokens", + "totalTokens" + ], + "members":{ + "inputTokens":{ + "shape":"TokenUsageInputTokensInteger", + "documentation":"The number of tokens sent in the request to the model.
" + }, + "outputTokens":{ + "shape":"TokenUsageOutputTokensInteger", + "documentation":"The number of tokens that the model generated for the request.
" + }, + "totalTokens":{ + "shape":"TokenUsageTotalTokensInteger", + "documentation":"The total of input tokens and tokens generated by the model.
" + } + }, + "documentation":"The tokens used in a message API inference call.
" + }, + "TokenUsageInputTokensInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "TokenUsageOutputTokensInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "TokenUsageTotalTokensInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "Tool":{ + "type":"structure", + "members":{ + "toolSpec":{ + "shape":"ToolSpecification", + "documentation":"The specfication for the tool.
" + } + }, + "documentation":"Information about a tool that you can use with the Converse API.
", + "union":true + }, + "ToolChoice":{ + "type":"structure", + "members":{ + "auto":{ + "shape":"AutoToolChoice", + "documentation":"The Model automatically decides if a tool should be called or to whether to generate text instead.
" + }, + "any":{ + "shape":"AnyToolChoice", + "documentation":"The model must request at least one tool (no text is generated).
" + }, + "tool":{ + "shape":"SpecificToolChoice", + "documentation":"The Model must request the specified tool.
" + } + }, + "documentation":"Forces a model to use a tool.
", + "union":true + }, + "ToolConfiguration":{ + "type":"structure", + "required":["tools"], + "members":{ + "tools":{ + "shape":"ToolConfigurationToolsList", + "documentation":"An array of tools that you want to pass to a model.
" + }, + "toolChoice":{ + "shape":"ToolChoice", + "documentation":"If supported by model, forces the model to request a tool.
" + } + }, + "documentation":"Configuration information for the tools that you pass to a model.
This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.
The JSON schema for the tool. For more information, see JSON Schema Reference.
" + } + }, + "documentation":"The schema for the tool. The top level schema type must be object
.
The ID of the tool request that this is the result for.
" + }, + "content":{ + "shape":"ToolResultContentBlocks", + "documentation":"The content for tool result content block.
" + }, + "status":{ + "shape":"ToolResultStatus", + "documentation":"The status for the tool result content block.
This field is only supported Anthropic Claude 3 models.
A tool result block that contains the results for a tool request that the model previously made.
" + }, + "ToolResultContentBlock":{ + "type":"structure", + "members":{ + "json":{ + "shape":"Document", + "documentation":"A tool result that is JSON format data.
" + }, + "text":{ + "shape":"String", + "documentation":"A tool result that is text.
" + }, + "image":{ + "shape":"ImageBlock", + "documentation":"A tool result that is an image.
This field is only supported by Anthropic Claude 3 models.
The tool result content block.
", + "union":true + }, + "ToolResultContentBlocks":{ + "type":"list", + "member":{"shape":"ToolResultContentBlock"} + }, + "ToolResultStatus":{ + "type":"string", + "enum":[ + "success", + "error" + ] + }, + "ToolSpecification":{ + "type":"structure", + "required":[ + "name", + "inputSchema" + ], + "members":{ + "name":{ + "shape":"ToolName", + "documentation":"The name for the tool.
" + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"The description for the tool.
" + }, + "inputSchema":{ + "shape":"ToolInputSchema", + "documentation":"The input schema for the tool in JSON format.
" + } + }, + "documentation":"The specification for the tool.
" + }, + "ToolUseBlock":{ + "type":"structure", + "required":[ + "toolUseId", + "name", + "input" + ], + "members":{ + "toolUseId":{ + "shape":"ToolUseId", + "documentation":"The ID for the tool request.
" + }, + "name":{ + "shape":"ToolName", + "documentation":"The name of the tool that the model wants to use.
" + }, + "input":{ + "shape":"Document", + "documentation":"The input to pass to the tool.
" + } + }, + "documentation":"A tool use content block. Contains information about a tool that the model is requesting be run., The model uses the result from the tool to generate a response.
" + }, + "ToolUseBlockDelta":{ + "type":"structure", + "required":["input"], + "members":{ + "input":{ + "shape":"String", + "documentation":"The input for a requested tool.
" + } + }, + "documentation":"The delta for a tool use block.
" + }, + "ToolUseBlockStart":{ + "type":"structure", + "required":[ + "toolUseId", + "name" + ], + "members":{ + "toolUseId":{ + "shape":"ToolUseId", + "documentation":"The ID for the tool request.
" + }, + "name":{ + "shape":"ToolName", + "documentation":"The name of the tool that the model is requesting to use.
" + } + }, + "documentation":"The start of a tool use block.
" + }, + "ToolUseId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, "Trace":{ "type":"string", "enum":[ diff --git a/botocore/data/cloudtrail/2013-11-01/service-2.json b/botocore/data/cloudtrail/2013-11-01/service-2.json index 332d2b0640..cadb309048 100644 --- a/botocore/data/cloudtrail/2013-11-01/service-2.json +++ b/botocore/data/cloudtrail/2013-11-01/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"cloudtrail", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"CloudTrail", "serviceFullName":"AWS CloudTrail", "serviceId":"CloudTrail", @@ -746,7 +747,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"InsufficientDependencyServiceAccessPermissionException"} ], - "documentation":"Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events for trails in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events.
When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.
Example
You create an event selector for a trail and specify that you want write-only events.
The EC2 GetConsoleOutput
and RunInstances
API operations occur in your account.
CloudTrail evaluates whether the events match your event selectors.
The RunInstances
is a write-only event and it matches your event selector. The trail logs the event.
The GetConsoleOutput
is a read-only event that doesn't match your event selector. The trail doesn't log the event.
The PutEventSelectors
operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException
exception is thrown.
You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide.
You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors
or EventSelectors
, but not both. If you apply AdvancedEventSelectors
to a trail, any existing EventSelectors
are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide.
Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events.
When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.
Example
You create an event selector for a trail and specify that you want write-only events.
The EC2 GetConsoleOutput
and RunInstances
API operations occur in your account.
CloudTrail evaluates whether the events match your event selectors.
The RunInstances
is a write-only event and it matches your event selector. The trail logs the event.
The GetConsoleOutput
is a read-only event that doesn't match your event selector. The trail doesn't log the event.
The PutEventSelectors
operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException
exception is thrown.
You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide.
You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors
or EventSelectors
, but not both. If you apply AdvancedEventSelectors
to a trail, any existing EventSelectors
are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide.
Starts an import of logged trail events from a source S3 bucket to a destination event data store. By default, CloudTrail only imports events contained in the S3 bucket's CloudTrail
prefix and the prefixes inside the CloudTrail
prefix, and does not check prefixes for other Amazon Web Services services. If you want to import CloudTrail events contained in another prefix, you must include the prefix in the S3LocationUri
. For more considerations about importing trail events, see Considerations.
When you start a new import, the Destinations
and ImportSource
parameters are required. Before starting a new import, disable any access control lists (ACLs) attached to the source S3 bucket. For more information about disabling ACLs, see Controlling ownership of objects and disabling ACLs for your bucket.
When you retry an import, the ImportID
parameter is required.
If the destination event data store is for an organization, you must use the management account to import trail events. You cannot use the delegated administrator account for the organization.
Starts an import of logged trail events from a source S3 bucket to a destination event data store. By default, CloudTrail only imports events contained in the S3 bucket's CloudTrail
prefix and the prefixes inside the CloudTrail
prefix, and does not check prefixes for other Amazon Web Services services. If you want to import CloudTrail events contained in another prefix, you must include the prefix in the S3LocationUri
. For more considerations about importing trail events, see Considerations for copying trail events in the CloudTrail User Guide.
When you start a new import, the Destinations
and ImportSource
parameters are required. Before starting a new import, disable any access control lists (ACLs) attached to the source S3 bucket. For more information about disabling ACLs, see Controlling ownership of objects and disabling ACLs for your bucket.
When you retry an import, the ImportID
parameter is required.
If the destination event data store is for an organization, you must use the management account to import trail events. You cannot use the delegated administrator account for the organization.
A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.
For CloudTrail management events, supported fields include readOnly
, eventCategory
, and eventSource
.
For CloudTrail data events, supported fields include readOnly
, eventCategory
, eventName
, resources.type
, and resources.ARN
.
For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory
.
readOnly
- Optional. Can be set to Equals
a value of true
or false
. If you do not add this field, CloudTrail logs both read
and write
events. A value of true
logs only read
events. A value of false
logs only write
events.
eventSource
- For filtering management events only. This can be set to NotEquals
kms.amazonaws.com
or NotEquals
rdsdata.amazonaws.com
.
eventName
- Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket
or GetSnapshotBlock
. You can have multiple values for this field, separated by commas.
eventCategory
- This is required and must be set to Equals
.
For CloudTrail management events, the value must be Management
.
For CloudTrail data events, the value must be Data
.
The following are used only for event data stores:
For CloudTrail Insights events, the value must be Insight
.
For Config configuration items, the value must be ConfigurationItem
.
For Audit Manager evidence, the value must be Evidence
.
For non-Amazon Web Services events, the value must be ActivityAuditLog
.
resources.type
- This field is required for CloudTrail data events. resources.type
can only use the Equals
operator, and the value can be one of the following:
AWS::DynamoDB::Table
AWS::Lambda::Function
AWS::S3::Object
AWS::AppConfig::Configuration
AWS::B2BI::Transformer
AWS::Bedrock::AgentAlias
AWS::Bedrock::KnowledgeBase
AWS::Cassandra::Table
AWS::CloudFront::KeyValueStore
AWS::CloudTrail::Channel
AWS::CodeWhisperer::Customization
AWS::CodeWhisperer::Profile
AWS::Cognito::IdentityPool
AWS::DynamoDB::Stream
AWS::EC2::Snapshot
AWS::EMRWAL::Workspace
AWS::FinSpace::Environment
AWS::Glue::Table
AWS::GreengrassV2::ComponentVersion
AWS::GreengrassV2::Deployment
AWS::GuardDuty::Detector
AWS::IoT::Certificate
AWS::IoT::Thing
AWS::IoTSiteWise::Asset
AWS::IoTSiteWise::TimeSeries
AWS::IoTTwinMaker::Entity
AWS::IoTTwinMaker::Workspace
AWS::KendraRanking::ExecutionPlan
AWS::KinesisVideo::Stream
AWS::ManagedBlockchain::Network
AWS::ManagedBlockchain::Node
AWS::MedicalImaging::Datastore
AWS::NeptuneGraph::Graph
AWS::PCAConnectorAD::Connector
AWS::QBusiness::Application
AWS::QBusiness::DataSource
AWS::QBusiness::Index
AWS::QBusiness::WebExperience
AWS::RDS::DBCluster
AWS::S3::AccessPoint
AWS::S3ObjectLambda::AccessPoint
AWS::S3Outposts::Object
AWS::SageMaker::Endpoint
AWS::SageMaker::ExperimentTrialComponent
AWS::SageMaker::FeatureGroup
AWS::ServiceDiscovery::Namespace
AWS::ServiceDiscovery::Service
AWS::SCN::Instance
AWS::SNS::PlatformEndpoint
AWS::SNS::Topic
AWS::SWF::Domain
AWS::SQS::Queue
AWS::SSMMessages::ControlChannel
AWS::ThinClient::Device
AWS::ThinClient::Environment
AWS::Timestream::Database
AWS::Timestream::Table
AWS::VerifiedPermissions::PolicyStore
You can have only one resources.type
field per selector. To log data events on more than one resource type, add another selector.
resources.ARN
- You can use any operator with resources.ARN
, but if you use Equals
or NotEquals
, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. For example, if resources.type equals AWS::S3::Object
, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith
operator, and include only the bucket ARN as the matching value.
The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.
arn:<partition>:s3:::<bucket_name>/
arn:<partition>:s3:::<bucket_name>/<object_path>/
When resources.type equals AWS::DynamoDB::Table
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>
When resources.type equals AWS::Lambda::Function
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>
When resources.type equals AWS::AppConfig::Configuration
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:appconfig:<region>:<account_ID>:application/<application_ID>/environment/<environment_ID>/configuration/<configuration_profile_ID>
When resources.type equals AWS::B2BI::Transformer
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:b2bi:<region>:<account_ID>:transformer/<transformer_ID>
When resources.type equals AWS::Bedrock::AgentAlias
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:bedrock:<region>:<account_ID>:agent-alias/<agent_ID>/<alias_ID>
When resources.type equals AWS::Bedrock::KnowledgeBase
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:bedrock:<region>:<account_ID>:knowledge-base/<knowledge_base_ID>
When resources.type equals AWS::Cassandra::Table
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:cassandra:<region>:<account_ID>:/keyspace/<keyspace_name>/table/<table_name>
When resources.type equals AWS::CloudFront::KeyValueStore
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:cloudfront:<region>:<account_ID>:key-value-store/<KVS_name>
When resources.type equals AWS::CloudTrail::Channel
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>
When resources.type equals AWS::CodeWhisperer::Customization
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:codewhisperer:<region>:<account_ID>:customization/<customization_ID>
When resources.type equals AWS::CodeWhisperer::Profile
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:codewhisperer:<region>:<account_ID>:profile/<profile_ID>
When resources.type equals AWS::Cognito::IdentityPool
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:cognito-identity:<region>:<account_ID>:identitypool/<identity_pool_ID>
When resources.type
equals AWS::DynamoDB::Stream
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>
When resources.type
equals AWS::EC2::Snapshot
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>
When resources.type
equals AWS::EMRWAL::Workspace
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:emrwal:<region>:<account_ID>:workspace/<workspace_name>
When resources.type
equals AWS::FinSpace::Environment
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>
When resources.type
equals AWS::Glue::Table
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>
When resources.type
equals AWS::GreengrassV2::ComponentVersion
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:greengrass:<region>:<account_ID>:components/<component_name>
When resources.type
equals AWS::GreengrassV2::Deployment
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:greengrass:<region>:<account_ID>:deployments/<deployment_ID
When resources.type
equals AWS::GuardDuty::Detector
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:guardduty:<region>:<account_ID>:detector/<detector_ID>
When resources.type
equals AWS::IoT::Certificate
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iot:<region>:<account_ID>:cert/<certificate_ID>
When resources.type
equals AWS::IoT::Thing
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iot:<region>:<account_ID>:thing/<thing_ID>
When resources.type
equals AWS::IoTSiteWise::Asset
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iotsitewise:<region>:<account_ID>:asset/<asset_ID>
When resources.type
equals AWS::IoTSiteWise::TimeSeries
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iotsitewise:<region>:<account_ID>:timeseries/<timeseries_ID>
When resources.type
equals AWS::IoTTwinMaker::Entity
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iottwinmaker:<region>:<account_ID>:workspace/<workspace_ID>/entity/<entity_ID>
When resources.type
equals AWS::IoTTwinMaker::Workspace
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iottwinmaker:<region>:<account_ID>:workspace/<workspace_ID>
When resources.type
equals AWS::KendraRanking::ExecutionPlan
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:kendra-ranking:<region>:<account_ID>:rescore-execution-plan/<rescore_execution_plan_ID>
When resources.type
equals AWS::KinesisVideo::Stream
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:kinesisvideo:<region>:<account_ID>:stream/<stream_name>/<creation_time>
When resources.type
equals AWS::ManagedBlockchain::Network
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:managedblockchain:::networks/<network_name>
When resources.type
equals AWS::ManagedBlockchain::Node
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>
When resources.type
equals AWS::MedicalImaging::Datastore
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:medical-imaging:<region>:<account_ID>:datastore/<data_store_ID>
When resources.type
equals AWS::NeptuneGraph::Graph
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:neptune-graph:<region>:<account_ID>:graph/<graph_ID>
When resources.type
equals AWS::PCAConnectorAD::Connector
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:pca-connector-ad:<region>:<account_ID>:connector/<connector_ID>
When resources.type
equals AWS::QBusiness::Application
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>
When resources.type
equals AWS::QBusiness::DataSource
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/index/<index_ID>/data-source/<datasource_ID>
When resources.type
equals AWS::QBusiness::Index
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/index/<index_ID>
When resources.type
equals AWS::QBusiness::WebExperience
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/web-experience/<web_experience_ID>
When resources.type
equals AWS::RDS::DBCluster
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:rds:<region>:<account_ID>:cluster/<cluster_name>
When resources.type
equals AWS::S3::AccessPoint
, and the operator is set to Equals
or NotEquals
, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith
or NotStartsWith
operators.
arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>
arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>
When resources.type
equals AWS::S3ObjectLambda::AccessPoint
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>
When resources.type
equals AWS::S3Outposts::Object
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>
When resources.type
equals AWS::SageMaker::Endpoint
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sagemaker:<region>:<account_ID>:endpoint/<endpoint_name>
When resources.type
equals AWS::SageMaker::ExperimentTrialComponent
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>
When resources.type
equals AWS::SageMaker::FeatureGroup
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>
When resources.type
equals AWS::SCN::Instance
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:scn:<region>:<account_ID>:instance/<instance_ID>
When resources.type
equals AWS::ServiceDiscovery::Namespace
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:servicediscovery:<region>:<account_ID>:namespace/<namespace_ID>
When resources.type
equals AWS::ServiceDiscovery::Service
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:servicediscovery:<region>:<account_ID>:service/<service_ID>
When resources.type
equals AWS::SNS::PlatformEndpoint
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sns:<region>:<account_ID>:endpoint/<endpoint_type>/<endpoint_name>/<endpoint_ID>
When resources.type
equals AWS::SNS::Topic
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sns:<region>:<account_ID>:<topic_name>
When resources.type
equals AWS::SWF::Domain
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:swf:<region>:<account_ID>:domain/<domain_name>
When resources.type
equals AWS::SQS::Queue
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sqs:<region>:<account_ID>:<queue_name>
When resources.type
equals AWS::SSMMessages::ControlChannel
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:ssmmessages:<region>:<account_ID>:control-channel/<channel_ID>
When resources.type
equals AWS::ThinClient::Device
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:thinclient:<region>:<account_ID>:device/<device_ID>
When resources.type
equals AWS::ThinClient::Environment
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:thinclient:<region>:<account_ID>:environment/<environment_ID>
When resources.type
equals AWS::Timestream::Database
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:timestream:<region>:<account_ID>:database/<database_name>
When resources.type
equals AWS::Timestream::Table
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:timestream:<region>:<account_ID>:database/<database_name>/table/<table_name>
When resources.type equals AWS::VerifiedPermissions::PolicyStore
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:verifiedpermissions:<region>:<account_ID>:policy-store/<policy_store_UUID>
A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.
For CloudTrail management events, supported fields include readOnly
, eventCategory
, and eventSource
.
For CloudTrail data events, supported fields include readOnly
, eventCategory
, eventName
, resources.type
, and resources.ARN
.
For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory
.
readOnly
- Optional. Can be set to Equals
a value of true
or false
. If you do not add this field, CloudTrail logs both read
and write
events. A value of true
logs only read
events. A value of false
logs only write
events.
eventSource
- For filtering management events only. This can be set to NotEquals
kms.amazonaws.com
or NotEquals
rdsdata.amazonaws.com
.
eventName
- Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket
or GetSnapshotBlock
. You can have multiple values for this field, separated by commas.
eventCategory
- This is required and must be set to Equals
.
For CloudTrail management events, the value must be Management
.
For CloudTrail data events, the value must be Data
.
The following are used only for event data stores:
For CloudTrail Insights events, the value must be Insight
.
For Config configuration items, the value must be ConfigurationItem
.
For Audit Manager evidence, the value must be Evidence
.
For non-Amazon Web Services events, the value must be ActivityAuditLog
.
resources.type
- This field is required for CloudTrail data events. resources.type
can only use the Equals
operator, and the value can be one of the following:
AWS::DynamoDB::Table
AWS::Lambda::Function
AWS::S3::Object
AWS::AppConfig::Configuration
AWS::B2BI::Transformer
AWS::Bedrock::AgentAlias
AWS::Bedrock::KnowledgeBase
AWS::Cassandra::Table
AWS::CloudFront::KeyValueStore
AWS::CloudTrail::Channel
AWS::CodeWhisperer::Customization
AWS::CodeWhisperer::Profile
AWS::Cognito::IdentityPool
AWS::DynamoDB::Stream
AWS::EC2::Snapshot
AWS::EMRWAL::Workspace
AWS::FinSpace::Environment
AWS::Glue::Table
AWS::GreengrassV2::ComponentVersion
AWS::GreengrassV2::Deployment
AWS::GuardDuty::Detector
AWS::IoT::Certificate
AWS::IoT::Thing
AWS::IoTSiteWise::Asset
AWS::IoTSiteWise::TimeSeries
AWS::IoTTwinMaker::Entity
AWS::IoTTwinMaker::Workspace
AWS::KendraRanking::ExecutionPlan
AWS::KinesisVideo::Stream
AWS::ManagedBlockchain::Network
AWS::ManagedBlockchain::Node
AWS::MedicalImaging::Datastore
AWS::NeptuneGraph::Graph
AWS::PCAConnectorAD::Connector
AWS::QApps:QApp
AWS::QBusiness::Application
AWS::QBusiness::DataSource
AWS::QBusiness::Index
AWS::QBusiness::WebExperience
AWS::RDS::DBCluster
AWS::S3::AccessPoint
AWS::S3ObjectLambda::AccessPoint
AWS::S3Outposts::Object
AWS::SageMaker::Endpoint
AWS::SageMaker::ExperimentTrialComponent
AWS::SageMaker::FeatureGroup
AWS::ServiceDiscovery::Namespace
AWS::ServiceDiscovery::Service
AWS::SCN::Instance
AWS::SNS::PlatformEndpoint
AWS::SNS::Topic
AWS::SQS::Queue
AWS::SSM::ManagedNode
AWS::SSMMessages::ControlChannel
AWS::SWF::Domain
AWS::ThinClient::Device
AWS::ThinClient::Environment
AWS::Timestream::Database
AWS::Timestream::Table
AWS::VerifiedPermissions::PolicyStore
AWS::XRay::Trace
You can have only one resources.type
field per selector. To log data events on more than one resource type, add another selector.
resources.ARN
- You can use any operator with resources.ARN
, but if you use Equals
or NotEquals
, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type.
You can't use the resources.ARN
field to filter resource types that do not have ARNs.
The resources.ARN
field can be set one of the following.
If resources.type equals AWS::S3::Object
, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith
operator, and include only the bucket ARN as the matching value.
The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.
arn:<partition>:s3:::<bucket_name>/
arn:<partition>:s3:::<bucket_name>/<object_path>/
When resources.type equals AWS::DynamoDB::Table
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>
When resources.type equals AWS::Lambda::Function
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>
When resources.type equals AWS::AppConfig::Configuration
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:appconfig:<region>:<account_ID>:application/<application_ID>/environment/<environment_ID>/configuration/<configuration_profile_ID>
When resources.type equals AWS::B2BI::Transformer
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:b2bi:<region>:<account_ID>:transformer/<transformer_ID>
When resources.type equals AWS::Bedrock::AgentAlias
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:bedrock:<region>:<account_ID>:agent-alias/<agent_ID>/<alias_ID>
When resources.type equals AWS::Bedrock::KnowledgeBase
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:bedrock:<region>:<account_ID>:knowledge-base/<knowledge_base_ID>
When resources.type equals AWS::Cassandra::Table
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:cassandra:<region>:<account_ID>:/keyspace/<keyspace_name>/table/<table_name>
When resources.type equals AWS::CloudFront::KeyValueStore
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:cloudfront:<region>:<account_ID>:key-value-store/<KVS_name>
When resources.type equals AWS::CloudTrail::Channel
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>
When resources.type equals AWS::CodeWhisperer::Customization
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:codewhisperer:<region>:<account_ID>:customization/<customization_ID>
When resources.type equals AWS::CodeWhisperer::Profile
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:codewhisperer:<region>:<account_ID>:profile/<profile_ID>
When resources.type equals AWS::Cognito::IdentityPool
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:cognito-identity:<region>:<account_ID>:identitypool/<identity_pool_ID>
When resources.type
equals AWS::DynamoDB::Stream
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>
When resources.type
equals AWS::EC2::Snapshot
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>
When resources.type
equals AWS::EMRWAL::Workspace
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:emrwal:<region>:<account_ID>:workspace/<workspace_name>
When resources.type
equals AWS::FinSpace::Environment
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>
When resources.type
equals AWS::Glue::Table
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>
When resources.type
equals AWS::GreengrassV2::ComponentVersion
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:greengrass:<region>:<account_ID>:components/<component_name>
When resources.type
equals AWS::GreengrassV2::Deployment
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:greengrass:<region>:<account_ID>:deployments/<deployment_ID
When resources.type
equals AWS::GuardDuty::Detector
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:guardduty:<region>:<account_ID>:detector/<detector_ID>
When resources.type
equals AWS::IoT::Certificate
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iot:<region>:<account_ID>:cert/<certificate_ID>
When resources.type
equals AWS::IoT::Thing
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iot:<region>:<account_ID>:thing/<thing_ID>
When resources.type
equals AWS::IoTSiteWise::Asset
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iotsitewise:<region>:<account_ID>:asset/<asset_ID>
When resources.type
equals AWS::IoTSiteWise::TimeSeries
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iotsitewise:<region>:<account_ID>:timeseries/<timeseries_ID>
When resources.type
equals AWS::IoTTwinMaker::Entity
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iottwinmaker:<region>:<account_ID>:workspace/<workspace_ID>/entity/<entity_ID>
When resources.type
equals AWS::IoTTwinMaker::Workspace
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:iottwinmaker:<region>:<account_ID>:workspace/<workspace_ID>
When resources.type
equals AWS::KendraRanking::ExecutionPlan
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:kendra-ranking:<region>:<account_ID>:rescore-execution-plan/<rescore_execution_plan_ID>
When resources.type
equals AWS::KinesisVideo::Stream
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:kinesisvideo:<region>:<account_ID>:stream/<stream_name>/<creation_time>
When resources.type
equals AWS::ManagedBlockchain::Network
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:managedblockchain:::networks/<network_name>
When resources.type
equals AWS::ManagedBlockchain::Node
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>
When resources.type
equals AWS::MedicalImaging::Datastore
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:medical-imaging:<region>:<account_ID>:datastore/<data_store_ID>
When resources.type
equals AWS::NeptuneGraph::Graph
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:neptune-graph:<region>:<account_ID>:graph/<graph_ID>
When resources.type
equals AWS::PCAConnectorAD::Connector
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:pca-connector-ad:<region>:<account_ID>:connector/<connector_ID>
When resources.type
equals AWS::QApps:QApp
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:qapps:<region>:<account_ID>:application/<application_UUID>/qapp/<qapp_UUID>
When resources.type
equals AWS::QBusiness::Application
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>
When resources.type
equals AWS::QBusiness::DataSource
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/index/<index_ID>/data-source/<datasource_ID>
When resources.type
equals AWS::QBusiness::Index
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/index/<index_ID>
When resources.type
equals AWS::QBusiness::WebExperience
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:qbusiness:<region>:<account_ID>:application/<application_ID>/web-experience/<web_experience_ID>
When resources.type
equals AWS::RDS::DBCluster
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:rds:<region>:<account_ID>:cluster/<cluster_name>
When resources.type
equals AWS::S3::AccessPoint
, and the operator is set to Equals
or NotEquals
, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith
or NotStartsWith
operators.
arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>
arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>
When resources.type
equals AWS::S3ObjectLambda::AccessPoint
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>
When resources.type
equals AWS::S3Outposts::Object
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>
When resources.type
equals AWS::SageMaker::Endpoint
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sagemaker:<region>:<account_ID>:endpoint/<endpoint_name>
When resources.type
equals AWS::SageMaker::ExperimentTrialComponent
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>
When resources.type
equals AWS::SageMaker::FeatureGroup
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>
When resources.type
equals AWS::SCN::Instance
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:scn:<region>:<account_ID>:instance/<instance_ID>
When resources.type
equals AWS::ServiceDiscovery::Namespace
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:servicediscovery:<region>:<account_ID>:namespace/<namespace_ID>
When resources.type
equals AWS::ServiceDiscovery::Service
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:servicediscovery:<region>:<account_ID>:service/<service_ID>
When resources.type
equals AWS::SNS::PlatformEndpoint
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sns:<region>:<account_ID>:endpoint/<endpoint_type>/<endpoint_name>/<endpoint_ID>
When resources.type
equals AWS::SNS::Topic
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sns:<region>:<account_ID>:<topic_name>
When resources.type
equals AWS::SQS::Queue
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:sqs:<region>:<account_ID>:<queue_name>
When resources.type
equals AWS::SSM::ManagedNode
, and the operator is set to Equals
or NotEquals
, the ARN must be in one of the following formats:
arn:<partition>:ssm:<region>:<account_ID>:managed-instance/<instance_ID>
arn:<partition>:ec2:<region>:<account_ID>:instance/<instance_ID>
When resources.type
equals AWS::SSMMessages::ControlChannel
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:ssmmessages:<region>:<account_ID>:control-channel/<channel_ID>
When resources.type
equals AWS::SWF::Domain
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:swf:<region>:<account_ID>:domain/<domain_name>
When resources.type
equals AWS::ThinClient::Device
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:thinclient:<region>:<account_ID>:device/<device_ID>
When resources.type
equals AWS::ThinClient::Environment
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:thinclient:<region>:<account_ID>:environment/<environment_ID>
When resources.type
equals AWS::Timestream::Database
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:timestream:<region>:<account_ID>:database/<database_name>
When resources.type
equals AWS::Timestream::Table
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:timestream:<region>:<account_ID>:database/<database_name>/table/<table_name>
When resources.type equals AWS::VerifiedPermissions::PolicyStore
, and the operator is set to Equals
or NotEquals
, the ARN must be in the following format:
arn:<partition>:verifiedpermissions:<region>:<account_ID>:policy-store/<policy_store_UUID>
This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see Enabling Trusted Access with Other Amazon Web Services Services and Prepare For Creating a Trail For Your Organization.
", + "documentation":"This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see How to enable or disable trusted access in the Organizations User Guide and Prepare For Creating a Trail For Your Organization in the CloudTrail User Guide.
", "exception":true }, "CloudTrailInvalidClientTokenIdException":{ @@ -1475,7 +1476,7 @@ }, "AdvancedEventSelectors":{ "shape":"AdvancedEventSelectors", - "documentation":"The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.
For more information about how to use advanced event selectors to log CloudTrail events, see Log events by using advanced event selectors in the CloudTrail User Guide.
For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration items in the CloudTrail User Guide.
For more information about how to use advanced event selectors to include non-Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide.
" + "documentation":"The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.
For more information about how to use advanced event selectors to log CloudTrail events, see Log events by using advanced event selectors in the CloudTrail User Guide.
For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration items in the CloudTrail User Guide.
For more information about how to use advanced event selectors to include events outside of Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide.
" }, "MultiRegionEnabled":{ "shape":"Boolean", @@ -1575,11 +1576,11 @@ }, "S3BucketName":{ "shape":"String", - "documentation":"Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.
" + "documentation":"Specifies the name of the Amazon S3 bucket designated for publishing log files. For information about bucket naming rules, see Bucket naming rules in the Amazon Simple Storage Service User Guide.
" }, "S3KeyPrefix":{ "shape":"String", - "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.
" + "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.
" }, "SnsTopicName":{ "shape":"String", @@ -1630,7 +1631,7 @@ }, "S3KeyPrefix":{ "shape":"String", - "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.
" + "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.
" }, "SnsTopicName":{ "shape":"String", @@ -1685,10 +1686,10 @@ }, "Values":{ "shape":"DataResourceValues", - "documentation":"An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified objects.
To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3
.
This also enables logging of data event activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a bucket that belongs to another Amazon Web Services account.
To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/
. The trail logs data events for all objects in this S3 bucket.
To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images
. The trail logs data events for objects in this S3 bucket that match the prefix.
To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda
.
This also enables logging of Invoke
activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a function that belongs to another Amazon Web Services account.
To log data events for a specific Lambda function, specify the function ARN.
Lambda function ARNs are exact. For example, if you specify a function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. They will not be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2.
To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb
.
An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.
To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3
.
This also enables logging of data event activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a bucket that belongs to another Amazon Web Services account.
To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/
. The trail logs data events for all objects in this S3 bucket.
To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images
. The trail logs data events for objects in this S3 bucket that match the prefix.
To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda
.
This also enables logging of Invoke
activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a function that belongs to another Amazon Web Services account.
To log data events for a specific Lambda function, specify the function ARN.
Lambda function ARNs are exact. For example, if you specify a function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. They will not be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2.
To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb
.
The Amazon S3 buckets, Lambda functions, or Amazon DynamoDB tables that you specify in your event selectors for your trail to log data events. Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.
The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors for the trail.
If you are using advanced event selectors, the maximum total number of values for all conditions, across all advanced event selectors for the trail, is 500.
The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1
. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read
and Write
data events.
A user uploads an image file to bucket-1
.
The PutObject
API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.
A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2
.
The PutObject
API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.
The following example demonstrates how logging works when you configure logging of Lambda data events for a Lambda function named MyLambdaFunction, but not for all Lambda functions.
A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.
The Invoke
API operation on MyLambdaFunction is an Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.
The Invoke
API operation on MyOtherLambdaFunction is an Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke
operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.
Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.
Configure the DataResource
to specify the resource type and resource ARNs for which you want to log data events.
You can specify the following resource types in your event selectors for your trail:
AWS::DynamoDB::Table
AWS::Lambda::Function
AWS::S3::Object
The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors for the trail.
If you are using advanced event selectors, the maximum total number of values for all conditions, across all advanced event selectors for the trail, is 500.
The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1
. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read
and Write
data events.
A user uploads an image file to bucket-1
.
The PutObject
API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.
A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2
.
The PutObject
API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.
The following example demonstrates how logging works when you configure logging of Lambda data events for a Lambda function named MyLambdaFunction, but not for all Lambda functions.
A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.
The Invoke
API operation on MyLambdaFunction is an Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.
The Invoke
API operation on MyOtherLambdaFunction is an Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke
operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.
A storage lake of event data against which you can run complex SQL-based queries. An event data store can include events that you have logged on your account. To select events for an event data store, use advanced event selectors.
" + "documentation":"A storage lake of event data against which you can run complex SQL-based queries. An event data store can include events that you have logged on your account. To select events for an event data store, use advanced event selectors.
" }, "EventDataStoreARNInvalidException":{ "type":"structure", @@ -2336,6 +2337,10 @@ "FederationRoleArn":{ "shape":"FederationRoleArn", "documentation":"If Lake query federation is enabled, provides the ARN of the federation role used to access the resources for the federated event data store.
" + }, + "PartitionKeys":{ + "shape":"PartitionKeyList", + "documentation":"The partition keys for the event data store. To improve query performance and efficiency, CloudTrail Lake organizes event data into partitions based on values derived from partition keys.
" } } }, @@ -2559,7 +2564,7 @@ }, "LatestDeliveryError":{ "shape":"String", - "documentation":"Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.
This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, create a new bucket, and then call UpdateTrail
to specify the new bucket; or fix the existing objects so that CloudTrail can again write to the bucket.
Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.
This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, fix the bucket policy so that CloudTrail can write to the bucket; or create a new bucket and call UpdateTrail
to specify the new bucket.
Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.
This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, create a new bucket, and then call UpdateTrail
to specify the new bucket; or fix the existing objects so that CloudTrail can again write to the bucket.
Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.
This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, fix the bucket policy so that CloudTrail can write to the bucket; or create a new bucket and call UpdateTrail
to specify the new bucket.
This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Create an event data store.
", + "documentation":"This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Organization event data stores.
", "exception":true }, "OperationNotPermittedException":{ @@ -3605,6 +3610,41 @@ "min":4, "pattern":".*" }, + "PartitionKey":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"PartitionKeyName", + "documentation":"The name of the partition key.
" + }, + "Type":{ + "shape":"PartitionKeyType", + "documentation":"The data type of the partition key. For example, bigint
or string
.
Contains information about a partition key for an event data store.
" + }, + "PartitionKeyList":{ + "type":"list", + "member":{"shape":"PartitionKey"}, + "max":2 + }, + "PartitionKeyName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "PartitionKeyType":{ + "type":"string", + "max":255, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, "PublicKey":{ "type":"structure", "members":{ @@ -4395,11 +4435,11 @@ }, "S3BucketName":{ "shape":"String", - "documentation":"Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket Naming Requirements.
" + "documentation":"Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket naming rules.
" }, "S3KeyPrefix":{ "shape":"String", - "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.
" + "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.
" }, "SnsTopicName":{ "shape":"String", @@ -4673,11 +4713,11 @@ }, "S3BucketName":{ "shape":"String", - "documentation":"Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.
" + "documentation":"Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket naming rules.
" }, "S3KeyPrefix":{ "shape":"String", - "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.
" + "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.
" }, "SnsTopicName":{ "shape":"String", @@ -4727,7 +4767,7 @@ }, "S3KeyPrefix":{ "shape":"String", - "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your IAM Log Files.
" + "documentation":"Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your IAM Log Files.
" }, "SnsTopicName":{ "shape":"String", diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index a744c84144..980943f613 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -5216,6 +5216,10 @@ } } }, + "AssociatedQueueIdList":{ + "type":"list", + "member":{"shape":"QueueId"} + }, "AssociationId":{ "type":"string", "max":100, @@ -17513,6 +17517,10 @@ "IsDefault":{ "shape":"Boolean", "documentation":"Whether this a default routing profile.
" + }, + "AssociatedQueueIds":{ + "shape":"AssociatedQueueIdList", + "documentation":"The IDs of the associated queue.
" } }, "documentation":"Contains information about a routing profile.
" @@ -17658,7 +17666,7 @@ }, "StringCondition":{ "shape":"StringCondition", - "documentation":"A leaf node condition which can be used to specify a string condition.
The currently supported values for FieldName
are name
, description
, and resourceID
.
A leaf node condition which can be used to specify a string condition.
The currently supported values for FieldName
are associatedQueueIds
, name
, description
, and resourceID
.
The search criteria to be used to return routing profiles.
The name
and description
fields support \"contains\" queries with a minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths outside of this range will throw invalid results.
Lists applications based on a set of parameters.
" }, + "ListJobRunAttempts":{ + "name":"ListJobRunAttempts", + "http":{ + "method":"GET", + "requestUri":"/applications/{applicationId}/jobruns/{jobRunId}/attempts", + "responseCode":200 + }, + "input":{"shape":"ListJobRunAttemptsRequest"}, + "output":{"shape":"ListJobRunAttemptsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists all attempt of a job run.
" + }, "ListJobRuns":{ "name":"ListJobRuns", "http":{ @@ -460,6 +476,11 @@ "X86_64" ] }, + "AttemptNumber":{ + "type":"integer", + "box":true, + "min":1 + }, "AutoStartConfig":{ "type":"structure", "members":{ @@ -837,6 +858,12 @@ "documentation":"The ID of the job run.
", "location":"uri", "locationName":"jobRunId" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job.
", + "location":"querystring", + "locationName":"attempt" } } }, @@ -867,6 +894,12 @@ "documentation":"The ID of the job run.
", "location":"uri", "locationName":"jobRunId" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job.
", + "location":"querystring", + "locationName":"attempt" } } }, @@ -1115,16 +1148,126 @@ "billedResourceUtilization":{ "shape":"ResourceUtilization", "documentation":"The aggregate vCPU, memory, and storage that Amazon Web Services has billed for the job run. The billed resources include a 1-minute minimum usage for workers, plus additional storage over 20 GB per worker. Note that billed resources do not include usage for idle pre-initialized workers.
" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"The mode of the job run.
" + }, + "retryPolicy":{ + "shape":"RetryPolicy", + "documentation":"The retry policy of the job run.
" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"The attempt of the job run.
" + }, + "attemptCreatedAt":{ + "shape":"Date", + "documentation":"The date and time of when the job run attempt was created.
" + }, + "attemptUpdatedAt":{ + "shape":"Date", + "documentation":"The date and time of when the job run attempt was last updated.
" } }, "documentation":"Information about a job run. A job run is a unit of work, such as a Spark JAR, Hive query, or SparkSQL query, that you submit to an Amazon EMR Serverless application.
" }, + "JobRunAttemptSummary":{ + "type":"structure", + "required":[ + "applicationId", + "id", + "arn", + "createdBy", + "jobCreatedAt", + "createdAt", + "updatedAt", + "executionRole", + "state", + "stateDetails", + "releaseLabel" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"The ID of the application the job is running on.
" + }, + "id":{ + "shape":"JobRunId", + "documentation":"The ID of the job run attempt.
" + }, + "name":{ + "shape":"String256", + "documentation":"The name of the job run attempt.
" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"The mode of the job run attempt.
" + }, + "arn":{ + "shape":"JobArn", + "documentation":"The Amazon Resource Name (ARN) of the job run.
" + }, + "createdBy":{ + "shape":"RequestIdentityUserArn", + "documentation":"The user who created the job run.
" + }, + "jobCreatedAt":{ + "shape":"Date", + "documentation":"The date and time of when the job run was created.
" + }, + "createdAt":{ + "shape":"Date", + "documentation":"The date and time when the job run attempt was created.
" + }, + "updatedAt":{ + "shape":"Date", + "documentation":"The date and time of when the job run attempt was last updated.
" + }, + "executionRole":{ + "shape":"IAMRoleArn", + "documentation":"The Amazon Resource Name (ARN) of the execution role of the job run..
" + }, + "state":{ + "shape":"JobRunState", + "documentation":"The state of the job run attempt.
" + }, + "stateDetails":{ + "shape":"String256", + "documentation":"The state details of the job run attempt.
" + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"The Amazon EMR release label of the job run attempt.
" + }, + "type":{ + "shape":"JobRunType", + "documentation":"The type of the job run, such as Spark or Hive.
" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"The attempt number of the job run execution.
" + } + }, + "documentation":"The summary of attributes associated with a job run attempt.
" + }, + "JobRunAttempts":{ + "type":"list", + "member":{"shape":"JobRunAttemptSummary"} + }, "JobRunId":{ "type":"string", "max":64, "min":1, "pattern":"[0-9a-z]+" }, + "JobRunMode":{ + "type":"string", + "enum":[ + "BATCH", + "STREAMING" + ] + }, "JobRunState":{ "type":"string", "enum":[ @@ -1171,6 +1314,10 @@ "shape":"String256", "documentation":"The optional job run name. This doesn't have to be unique.
" }, + "mode":{ + "shape":"JobRunMode", + "documentation":"The mode of the job run.
" + }, "arn":{ "shape":"JobArn", "documentation":"The ARN of the job run.
" @@ -1206,6 +1353,18 @@ "type":{ "shape":"JobRunType", "documentation":"The type of job run, such as Spark or Hive.
" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"The attempt number of the job run execution.
" + }, + "attemptCreatedAt":{ + "shape":"Date", + "documentation":"The date and time of when the job run attempt was created.
" + }, + "attemptUpdatedAt":{ + "shape":"Date", + "documentation":"The date and time of when the job run attempt was last updated.
" } }, "documentation":"The summary of attributes associated with a job run.
" @@ -1258,6 +1417,59 @@ } } }, + "ListJobRunAttemptsRequest":{ + "type":"structure", + "required":[ + "applicationId", + "jobRunId" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"The ID of the application for which to list job runs.
", + "location":"uri", + "locationName":"applicationId" + }, + "jobRunId":{ + "shape":"JobRunId", + "documentation":"The ID of the job run to list.
", + "location":"uri", + "locationName":"jobRunId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The token for the next set of job run attempt results.
", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ListJobRunAttemptsRequestMaxResultsInteger", + "documentation":"The maximum number of job run attempts to list.
", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListJobRunAttemptsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, + "ListJobRunAttemptsResponse":{ + "type":"structure", + "required":["jobRunAttempts"], + "members":{ + "jobRunAttempts":{ + "shape":"JobRunAttempts", + "documentation":"The array of the listed job run attempt objects.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The output displays the token for the next set of application results. This is required for pagination and is available as a response of the previous request.
" + } + } + }, "ListJobRunsRequest":{ "type":"structure", "required":["applicationId"], @@ -1297,6 +1509,12 @@ "documentation":"An optional filter for job run states. Note that if this filter contains multiple states, the resulting list will be grouped by the state.
", "location":"querystring", "locationName":"states" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"The mode of the job runs to list.
", + "location":"querystring", + "locationName":"mode" } } }, @@ -1529,6 +1747,25 @@ }, "documentation":"The resource utilization for memory, storage, and vCPU for jobs.
" }, + "RetryPolicy":{ + "type":"structure", + "members":{ + "maxAttempts":{ + "shape":"AttemptNumber", + "documentation":"Maximum number of attempts for the job run. This parameter is only applicable for BATCH
mode.
Maximum number of failed attempts per hour. This [arameter is only applicable for STREAMING
mode.
The retry policy to use for a job run.
" + }, + "RetryPolicyMaxFailedAttemptsPerHourInteger":{ + "type":"integer", + "box":true, + "min":1 + }, "S3MonitoringConfiguration":{ "type":"structure", "members":{ @@ -1662,6 +1899,14 @@ "name":{ "shape":"String256", "documentation":"The optional job run name. This doesn't have to be unique.
" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"The mode of the job run when it starts.
" + }, + "retryPolicy":{ + "shape":"RetryPolicy", + "documentation":"The retry policy when job run starts.
" } } }, diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index ef12d953bb..c28791443d 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -78,7 +78,7 @@ {"shape":"TenantDatabaseNotFoundFault"}, {"shape":"DBSnapshotTenantDatabaseNotFoundFault"} ], - "documentation":"Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.
For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.
" + "documentation":"Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.
For an overview on tagging your relational database resources, see Tagging Amazon RDS Resources or Tagging Amazon Aurora and Amazon RDS Resources.
" }, "ApplyPendingMaintenanceAction":{ "name":"ApplyPendingMaintenanceAction", @@ -4127,7 +4127,7 @@ }, "DatabaseName":{ "shape":"String", - "documentation":"The name for your database of up to 64 alphanumeric characters. If you don't provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating.
Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters
" + "documentation":"The name for your database of up to 64 alphanumeric characters. A database named postgres
is always created. If this parameter is specified, an additional database with this name is created.
Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters
" }, "DBClusterIdentifier":{ "shape":"String", @@ -4406,7 +4406,7 @@ "members":{ "DBName":{ "shape":"String", - "documentation":"The meaning of this parameter differs according to the database engine you use.
The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.
Constraints:
Must contain 1 to 64 alphanumeric characters.
Can't be a word reserved by the database engine.
The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres
is created in the DB cluster.
Constraints:
It must contain 1 to 63 alphanumeric characters.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL
for non-CDBs and RDSCDB
for CDBs.
Default: ORCL
Constraints:
Must contain 1 to 8 alphanumeric characters.
Must contain a letter.
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. In some cases, we recommend that you don't add a database name. For more information, see Additional considerations in the Amazon RDS User Guide.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine.
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine.
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine.
The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL
. You can't specify the string null
, or any other reserved word, for DBName
.
Default: ORCL
Constraints:
Can't be longer than 8 characters.
The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres
is created in the DB instance.
Constraints:
Must contain 1 to 63 letters, numbers, or underscores.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine.
Not applicable. Must be null.
The meaning of this parameter differs according to the database engine you use.
The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.
Constraints:
Must contain 1 to 64 alphanumeric characters.
Can't be a word reserved by the database engine.
The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. A database named postgres
is always created. If this parameter is specified, an additional database with this name is created.
Constraints:
It must contain 1 to 63 alphanumeric characters.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL
for non-CDBs and RDSCDB
for CDBs.
Default: ORCL
Constraints:
Must contain 1 to 8 alphanumeric characters.
Must contain a letter.
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. In some cases, we recommend that you don't add a database name. For more information, see Additional considerations in the Amazon RDS User Guide.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine.
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine.
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine.
The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL
. You can't specify the string null
, or any other reserved word, for DBName
.
Default: ORCL
Constraints:
Can't be longer than 8 characters.
The name of the database to create when the DB instance is created. A database named postgres
is always created. If this parameter is specified, an additional database with this name is created.
Constraints:
Must contain 1 to 63 letters, numbers, or underscores.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine.
Not applicable. Must be null.
An Amazon Web Services Identity and Access Management (IAM) role to allow Amazon RDS to access your Amazon S3 bucket.
" + "documentation":"An Amazon Web Services Identity and Access Management (IAM) role with a trust policy and a permissions policy that allows Amazon RDS to access your Amazon S3 bucket. For information about this role, see Creating an IAM role manually in the Amazon RDS User Guide.
" }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", @@ -16568,7 +16568,7 @@ "documentation":"A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with aws:
or rds:
. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").
Metadata assigned to an Amazon RDS resource consisting of a key-value pair.
For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.
" + "documentation":"Metadata assigned to an Amazon RDS resource consisting of a key-value pair.
For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide.
" }, "TagList":{ "type":"list", diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index fc335692ad..c8c99b2bb3 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -1920,7 +1920,7 @@ }, "input":{"shape":"DescribeModelPackageInput"}, "output":{"shape":"DescribeModelPackageOutput"}, - "documentation":"Returns a description of the specified model package, which is used to create SageMaker models or list them on Amazon Web Services Marketplace.
To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services Marketplace.
" + "documentation":"Returns a description of the specified model package, which is used to create SageMaker models or list them on Amazon Web Services Marketplace.
If you provided a KMS Key ID when you created your model package, you will see the KMS Decrypt API call in your CloudTrail logs when you use this API.
To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services Marketplace.
" }, "DescribeModelPackageGroup":{ "name":"DescribeModelPackageGroup", @@ -5121,7 +5121,13 @@ "randomforest", "extra-trees", "nn-torch", - "fastai" + "fastai", + "cnn-qr", + "deepar", + "prophet", + "npts", + "arima", + "ets" ] }, "AutoMLAlgorithmConfig":{ @@ -5130,10 +5136,10 @@ "members":{ "AutoMLAlgorithms":{ "shape":"AutoMLAlgorithms", - "documentation":"The selection of algorithms run on a dataset to train the model candidates of an Autopilot job.
Selected algorithms must belong to the list corresponding to the training mode set in AutoMLJobConfig.Mode (ENSEMBLING
or HYPERPARAMETER_TUNING
). Choose a minimum of 1 algorithm.
In ENSEMBLING
mode:
\"catboost\"
\"extra-trees\"
\"fastai\"
\"lightgbm\"
\"linear-learner\"
\"nn-torch\"
\"randomforest\"
\"xgboost\"
In HYPERPARAMETER_TUNING
mode:
\"linear-learner\"
\"mlp\"
\"xgboost\"
The selection of algorithms trained on your dataset to generate the model candidates for an Autopilot job.
For the tabular problem type TabularJobConfig
:
Selected algorithms must belong to the list corresponding to the training mode set in AutoMLJobConfig.Mode (ENSEMBLING
or HYPERPARAMETER_TUNING
). Choose a minimum of 1 algorithm.
In ENSEMBLING
mode:
\"catboost\"
\"extra-trees\"
\"fastai\"
\"lightgbm\"
\"linear-learner\"
\"nn-torch\"
\"randomforest\"
\"xgboost\"
In HYPERPARAMETER_TUNING
mode:
\"linear-learner\"
\"mlp\"
\"xgboost\"
For the time-series forecasting problem type TimeSeriesForecastingJobConfig
:
Choose your algorithms from this list.
\"cnn-qr\"
\"deepar\"
\"prophet\"
\"arima\"
\"npts\"
\"ets\"
The collection of algorithms run on a dataset for training the model candidates of an Autopilot job.
" + "documentation":"The selection of algorithms trained on your dataset to generate the model candidates for an Autopilot job.
" }, "AutoMLAlgorithms":{ "type":"list", @@ -5213,7 +5219,7 @@ }, "AlgorithmsConfig":{ "shape":"AutoMLAlgorithmsConfig", - "documentation":"Stores the configuration information for the selection of algorithms used to train the model candidates.
The list of available algorithms to choose from depends on the training mode set in AutoMLJobConfig.Mode
.
AlgorithmsConfig
should not be set in AUTO
training mode.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is empty, AutoMLCandidateGenerationConfig
uses the full set of algorithms for the given training mode.
When AlgorithmsConfig
is not provided, AutoMLCandidateGenerationConfig
uses the full set of algorithms for the given training mode.
For the list of all algorithms per training mode, see AutoMLAlgorithmConfig.
For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
" + "documentation":"Stores the configuration information for the selection of algorithms trained on tabular data.
The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode
.
AlgorithmsConfig
should not be set if the training mode is set on AUTO
.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is empty, CandidateGenerationConfig
uses the full set of algorithms for the given training mode.
When AlgorithmsConfig
is not provided, CandidateGenerationConfig
uses the full set of algorithms for the given training mode.
For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig.
For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
" } }, "documentation":"Stores the configuration information for how a candidate is generated (optional).
" @@ -6181,7 +6187,7 @@ "members":{ "AlgorithmsConfig":{ "shape":"AutoMLAlgorithmsConfig", - "documentation":"Stores the configuration information for the selection of algorithms used to train model candidates on tabular data.
The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode
.
AlgorithmsConfig
should not be set in AUTO
training mode.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is empty, CandidateGenerationConfig
uses the full set of algorithms for the given training mode.
When AlgorithmsConfig
is not provided, CandidateGenerationConfig
uses the full set of algorithms for the given training mode.
For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig.
For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.
" + "documentation":"Your Autopilot job trains a default set of algorithms on your dataset. For tabular and time-series data, you can customize the algorithm list by selecting a subset of algorithms for your problem type.
AlgorithmsConfig
stores the customized selection of algorithms to train on your data.
For the tabular problem type TabularJobConfig
, the list of available algorithms to choose from depends on the training mode set in AutoMLJobConfig.Mode
.
AlgorithmsConfig
should not be set when the training mode AutoMLJobConfig.Mode
is set to AUTO
.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is empty, CandidateGenerationConfig
uses the full set of algorithms for the given training mode.
When AlgorithmsConfig
is not provided, CandidateGenerationConfig
uses the full set of algorithms for the given training mode.
For the list of all algorithms per training mode, see AlgorithmConfig.
For more information on each algorithm, see the Algorithm support section in the Autopilot developer guide.
For the time-series forecasting problem type TimeSeriesForecastingJobConfig
, choose your algorithms from the list provided in AlgorithmConfig.
For more information on each algorithm, see the Algorithms support for time-series forecasting section in the Autopilot developer guide.
When AlgorithmsConfig
is provided, one AutoMLAlgorithms
attribute must be set and one only.
If the list of algorithms provided as values for AutoMLAlgorithms
is empty, CandidateGenerationConfig
uses the full set of algorithms for time-series forecasting.
When AlgorithmsConfig
is not provided, CandidateGenerationConfig
uses the full set of algorithms for time-series forecasting.
Stores the configuration information for how model candidates are generated using an AutoML job V2.
" @@ -9647,6 +9653,14 @@ "SourceUri":{ "shape":"ModelPackageSourceUri", "documentation":"The URI of the source for the model package. If you want to clone a model package, set it to the model package Amazon Resource Name (ARN). If you want to register a model, set it to the model ARN.
" + }, + "SecurityConfig":{ + "shape":"ModelPackageSecurityConfig", + "documentation":"The KMS Key ID (KMSKeyId
) used for encryption of model package information.
The model card associated with the model package. Since ModelPackageModelCard
is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard
. The ModelPackageModelCard
schema does not include model_package_details
, and model_overview
is composed of the model_creator
and model_artifact
properties. For more information about the model card associated with the model package, see View the Details of a Model Version.
The URI of the source for the model package.
" + }, + "SecurityConfig":{ + "shape":"ModelPackageSecurityConfig", + "documentation":"The KMS Key ID (KMSKeyId
) used for encryption of model package information.
The model card associated with the model package. Since ModelPackageModelCard
is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard
. The ModelPackageModelCard
schema does not include model_package_details
, and model_overview
is composed of the model_creator
and model_artifact
properties. For more information about the model card associated with the model package, see View the Details of a Model Version.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken
is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken
is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken
is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken
is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken
is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken
is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be returned in a single response. The MaxResults
parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken
is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
This parameter defines the maximum number of results that can be return in a single response. The MaxResults
parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken
is provided in the response. The NextToken
indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults
is 10.
The URI of the source for the model package.
" }, + "SecurityConfig":{"shape":"ModelPackageSecurityConfig"}, + "ModelCard":{"shape":"ModelPackageModelCard"}, "Tags":{ "shape":"TagList", "documentation":"A list of the tags associated with the model package. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.
" @@ -27234,6 +27258,31 @@ "type":"list", "member":{"shape":"ModelPackageGroupSummary"} }, + "ModelPackageModelCard":{ + "type":"structure", + "members":{ + "ModelCardContent":{ + "shape":"ModelCardContent", + "documentation":"The content of the model card.
" + }, + "ModelCardStatus":{ + "shape":"ModelCardStatus", + "documentation":"The approval status of the model card within your organization. Different organizations might have different criteria for model card review and approval.
Draft
: The model card is a work in progress.
PendingReview
: The model card is pending review.
Approved
: The model card is approved.
Archived
: The model card is archived. No more updates can be made to the model card content. If you try to update the model card content, you will receive the message Model Card is in Archived state
.
The model card associated with the model package. Since ModelPackageModelCard
is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard
. The ModelPackageModelCard
schema does not include model_package_details
, and model_overview
is composed of the model_creator
and model_artifact
properties. For more information about the model card associated with the model package, see View the Details of a Model Version.
The KMS Key ID (KMSKeyId
) used for encryption of model package information.
An optional Key Management Service key to encrypt, decrypt, and re-encrypt model package information for regulated workloads with highly sensitive data.
" + }, "ModelPackageSortBy":{ "type":"string", "enum":[ @@ -34438,7 +34487,8 @@ "HolidayConfig":{ "shape":"HolidayConfig", "documentation":"The collection of holiday featurization attributes used to incorporate national holiday information into your forecasting model.
" - } + }, + "CandidateGenerationConfig":{"shape":"CandidateGenerationConfig"} }, "documentation":"The collection of settings used by an AutoML job V2 for the time-series forecasting problem type.
" }, @@ -36762,6 +36812,10 @@ "SourceUri":{ "shape":"ModelPackageSourceUri", "documentation":"The URI of the source for the model package.
" + }, + "ModelCard":{ + "shape":"ModelPackageModelCard", + "documentation":"The model card associated with the model package. Since ModelPackageModelCard
is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard
. The ModelPackageModelCard
schema does not include model_package_details
, and model_overview
is composed of the model_creator
and model_artifact
properties. For more information about the model card associated with the model package, see View the Details of a Model Version.