From dd723bee4a8be4151b29a195bd6b0f920a6c2f3c Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 4 Dec 2024 19:08:57 +0000 Subject: [PATCH 1/2] Update to latest models --- .../api-change-bedrock-13860.json | 5 + .../api-change-bedrockagent-72999.json | 5 + .../api-change-bedrockagentruntime-2027.json | 5 + ...pi-change-bedrockdataautomation-77172.json | 5 + ...ge-bedrockdataautomationruntime-71717.json | 5 + .../api-change-bedrockruntime-67114.json | 5 + .../next-release/api-change-kendra-74397.json | 5 + .../api-change-sagemaker-9454.json | 5 + .../2023-07-26/service-2.json | 249 +- .../bedrock-agent/2023-06-05/service-2.json | 487 +++- .../2024-06-13/endpoint-rule-set-1.json | 350 +++ .../2024-06-13/paginators-1.json | 3 + .../2024-06-13/service-2.json | 394 +++ .../2023-07-26/endpoint-rule-set-1.json | 350 +++ .../2023-07-26/paginators-1.json | 16 + .../2023-07-26/service-2.json | 1247 ++++++++++ .../bedrock-runtime/2023-09-30/service-2.json | 144 +- .../data/bedrock/2023-04-20/paginators-1.json | 12 + .../data/bedrock/2023-04-20/service-2.json | 803 +++++- .../data/kendra/2019-02-03/service-2.json | 127 +- .../sagemaker/2017-07-24/paginators-1.json | 24 + .../data/sagemaker/2017-07-24/service-2.json | 2145 ++++++++++++++++- .../endpoint-tests-1.json | 314 +++ .../endpoint-tests-1.json | 314 +++ 24 files changed, 6939 insertions(+), 80 deletions(-) create mode 100644 .changes/next-release/api-change-bedrock-13860.json create mode 100644 .changes/next-release/api-change-bedrockagent-72999.json create mode 100644 .changes/next-release/api-change-bedrockagentruntime-2027.json create mode 100644 .changes/next-release/api-change-bedrockdataautomation-77172.json create mode 100644 .changes/next-release/api-change-bedrockdataautomationruntime-71717.json create mode 100644 .changes/next-release/api-change-bedrockruntime-67114.json create mode 100644 .changes/next-release/api-change-kendra-74397.json create mode 100644 .changes/next-release/api-change-sagemaker-9454.json create mode 100644 botocore/data/bedrock-data-automation-runtime/2024-06-13/endpoint-rule-set-1.json create mode 100644 botocore/data/bedrock-data-automation-runtime/2024-06-13/paginators-1.json create mode 100644 botocore/data/bedrock-data-automation-runtime/2024-06-13/service-2.json create mode 100644 botocore/data/bedrock-data-automation/2023-07-26/endpoint-rule-set-1.json create mode 100644 botocore/data/bedrock-data-automation/2023-07-26/paginators-1.json create mode 100644 botocore/data/bedrock-data-automation/2023-07-26/service-2.json create mode 100644 tests/functional/endpoint-rules/bedrock-data-automation-runtime/endpoint-tests-1.json create mode 100644 tests/functional/endpoint-rules/bedrock-data-automation/endpoint-tests-1.json diff --git a/.changes/next-release/api-change-bedrock-13860.json b/.changes/next-release/api-change-bedrock-13860.json new file mode 100644 index 0000000000..df00fbdd57 --- /dev/null +++ b/.changes/next-release/api-change-bedrock-13860.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock``", + "description": "Introduced two APIs ListPromptRouters and GetPromptRouter for Intelligent Prompt Router feature. Add support for Bedrock Guardrails image content filter. New Bedrock Marketplace feature enabling a wider range of bedrock compatible models with self-hosted capability." +} diff --git a/.changes/next-release/api-change-bedrockagent-72999.json b/.changes/next-release/api-change-bedrockagent-72999.json new file mode 100644 index 0000000000..2ef20a639f --- /dev/null +++ b/.changes/next-release/api-change-bedrockagent-72999.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-agent``", + "description": "This release introduces the ability to generate SQL using natural language, through a new GenerateQuery API (with native integration into Knowledge Bases); ability to ingest and retrieve images through Bedrock Data Automation; and ability to create a Knowledge Base backed by Kendra GenAI Index." +} diff --git a/.changes/next-release/api-change-bedrockagentruntime-2027.json b/.changes/next-release/api-change-bedrockagentruntime-2027.json new file mode 100644 index 0000000000..6a411146bf --- /dev/null +++ b/.changes/next-release/api-change-bedrockagentruntime-2027.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-agent-runtime``", + "description": "This release introduces the ability to generate SQL using natural language, through a new GenerateQuery API (with native integration into Knowledge Bases); ability to ingest and retrieve images through Bedrock Data Automation; and ability to create a Knowledge Base backed by Kendra GenAI Index." +} diff --git a/.changes/next-release/api-change-bedrockdataautomation-77172.json b/.changes/next-release/api-change-bedrockdataautomation-77172.json new file mode 100644 index 0000000000..5b33934a7a --- /dev/null +++ b/.changes/next-release/api-change-bedrockdataautomation-77172.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-data-automation``", + "description": "Release Bedrock Data Automation SDK" +} diff --git a/.changes/next-release/api-change-bedrockdataautomationruntime-71717.json b/.changes/next-release/api-change-bedrockdataautomationruntime-71717.json new file mode 100644 index 0000000000..5afd2a98ff --- /dev/null +++ b/.changes/next-release/api-change-bedrockdataautomationruntime-71717.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-data-automation-runtime``", + "description": "Release Bedrock Data Automation Runtime SDK" +} diff --git a/.changes/next-release/api-change-bedrockruntime-67114.json b/.changes/next-release/api-change-bedrockruntime-67114.json new file mode 100644 index 0000000000..e1d8878da8 --- /dev/null +++ b/.changes/next-release/api-change-bedrockruntime-67114.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-runtime``", + "description": "Added support for Intelligent Prompt Router in Invoke, InvokeStream, Converse and ConverseStream. Add support for Bedrock Guardrails image content filter. New Bedrock Marketplace feature enabling a wider range of bedrock compatible models with self-hosted capability." +} diff --git a/.changes/next-release/api-change-kendra-74397.json b/.changes/next-release/api-change-kendra-74397.json new file mode 100644 index 0000000000..5ba84fe10e --- /dev/null +++ b/.changes/next-release/api-change-kendra-74397.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``kendra``", + "description": "This release adds GenAI Index in Amazon Kendra for Retrieval Augmented Generation (RAG) and intelligent search. With the Kendra GenAI Index, customers get high retrieval accuracy powered by the latest information retrieval technologies and semantic models." +} diff --git a/.changes/next-release/api-change-sagemaker-9454.json b/.changes/next-release/api-change-sagemaker-9454.json new file mode 100644 index 0000000000..494d2670f5 --- /dev/null +++ b/.changes/next-release/api-change-sagemaker-9454.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``sagemaker``", + "description": "Amazon SageMaker HyperPod launched task governance to help customers maximize accelerator utilization for model development and flexible training plans to meet training timelines and budget while reducing weeks of training time. AI apps from AWS partner is now available in SageMaker." +} diff --git a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json index 3c26257353..ae45ead207 100644 --- a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json +++ b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json @@ -37,6 +37,28 @@ "documentation":"

Deletes memory from the specified memory identifier.

", "idempotent":true }, + "GenerateQuery":{ + "name":"GenerateQuery", + "http":{ + "method":"POST", + "requestUri":"/generateQuery", + "responseCode":200 + }, + "input":{"shape":"GenerateQueryRequest"}, + "output":{"shape":"GenerateQueryResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"DependencyFailedException"}, + {"shape":"BadGatewayException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Generates an SQL query from a natural language query. For more information, see Generate a query for structured data in the Amazon Bedrock User Guide.

" + }, "GetAgentMemory":{ "name":"GetAgentMemory", "http":{ @@ -1737,6 +1759,56 @@ "type":"list", "member":{"shape":"FunctionDefinition"} }, + "GenerateQueryRequest":{ + "type":"structure", + "required":[ + "queryGenerationInput", + "transformationConfiguration" + ], + "members":{ + "queryGenerationInput":{ + "shape":"QueryGenerationInput", + "documentation":"

Specifies information about a natural language query to transform into SQL.

" + }, + "transformationConfiguration":{ + "shape":"TransformationConfiguration", + "documentation":"

Specifies configurations for transforming the natural language query into SQL.

" + } + } + }, + "GenerateQueryResponse":{ + "type":"structure", + "members":{ + "queries":{ + "shape":"GeneratedQueries", + "documentation":"

A list of objects, each of which defines a generated query that can correspond to the natural language queries.

" + } + } + }, + "GeneratedQueries":{ + "type":"list", + "member":{"shape":"GeneratedQuery"}, + "min":0 + }, + "GeneratedQuery":{ + "type":"structure", + "members":{ + "sql":{ + "shape":"String", + "documentation":"

An SQL query that corresponds to the natural language query.

" + }, + "type":{ + "shape":"GeneratedQueryType", + "documentation":"

The type of transformed query.

" + } + }, + "documentation":"

Contains information about a query generated for a natural language query.

", + "sensitive":true + }, + "GeneratedQueryType":{ + "type":"string", + "enum":["REDSHIFT_SQL"] + }, "GeneratedResponsePart":{ "type":"structure", "members":{ @@ -2485,6 +2557,10 @@ "documentation":"

Contains information about the prompt to optimize.

", "union":true }, + "InputQueryType":{ + "type":"string", + "enum":["TEXT"] + }, "InputText":{ "type":"string", "max":25000000, @@ -2838,6 +2914,12 @@ }, "documentation":"

Details of the knowledge base associated withe inline agent.

" }, + "KnowledgeBaseArn":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^arn:aws(|-cn|-us-gov):bedrock:[a-zA-Z0-9-]*:[0-9]{12}:knowledge-base/[0-9a-zA-Z]+$" + }, "KnowledgeBaseConfiguration":{ "type":"structure", "required":[ @@ -2929,7 +3011,7 @@ "members":{ "content":{ "shape":"RetrievalResultContent", - "documentation":"

Contains a chunk of text from a data source in the knowledge base.

" + "documentation":"

Contains information about the content of the chunk.

" }, "location":{ "shape":"RetrievalResultLocation", @@ -3807,6 +3889,30 @@ }, "documentation":"

Contains the parameters in the request body.

" }, + "QueryGenerationInput":{ + "type":"structure", + "required":[ + "text", + "type" + ], + "members":{ + "text":{ + "shape":"QueryGenerationInputTextString", + "documentation":"

The text of the query.

" + }, + "type":{ + "shape":"InputQueryType", + "documentation":"

The type of the query.

" + } + }, + "documentation":"

Contains information about a natural language query to transform into SQL.

", + "sensitive":true + }, + "QueryGenerationInputTextString":{ + "type":"string", + "max":20000, + "min":1 + }, "QueryTransformationConfiguration":{ "type":"structure", "required":["type"], @@ -3818,6 +3924,10 @@ }, "documentation":"

To split up the prompt and retrieve multiple sources, set the transformation type to QUERY_DECOMPOSITION.

" }, + "QueryTransformationMode":{ + "type":"string", + "enum":["TEXT_TO_SQL"] + }, "QueryTransformationType":{ "type":"string", "enum":["QUERY_DECOMPOSITION"] @@ -4286,16 +4396,70 @@ }, "RetrievalResultContent":{ "type":"structure", - "required":["text"], "members":{ + "byteContent":{ + "shape":"String", + "documentation":"

A data URI with base64-encoded content from the data source. The URI is in the following format: returned in the following format: data:image/jpeg;base64,${base64-encoded string}.

" + }, + "row":{ + "shape":"RetrievalResultContentRow", + "documentation":"

Specifies information about the rows with the cells to return in retrieval.

" + }, "text":{ "shape":"String", "documentation":"

The cited text from the data source.

" + }, + "type":{ + "shape":"RetrievalResultContentType", + "documentation":"

The type of content in the retrieval result.

" + } + }, + "documentation":"

Contains information about a chunk of text from a data source in the knowledge base. If the result is from a structured data source, the cell in the database and the type of the value is also identified.

This data type is used in the following API operations:

", + "sensitive":true + }, + "RetrievalResultContentColumn":{ + "type":"structure", + "members":{ + "columnName":{ + "shape":"String", + "documentation":"

The name of the column.

" + }, + "columnValue":{ + "shape":"String", + "documentation":"

The value in the column.

" + }, + "type":{ + "shape":"RetrievalResultContentColumnType", + "documentation":"

The data type of the value.

" } }, - "documentation":"

Contains the cited text from the data source.

This data type is used in the following API operations:

", + "documentation":"

Contains information about a column with a cell to return in retrieval.

", + "sensitive":true + }, + "RetrievalResultContentColumnType":{ + "type":"string", + "enum":[ + "BLOB", + "BOOLEAN", + "DOUBLE", + "NULL", + "LONG", + "STRING" + ] + }, + "RetrievalResultContentRow":{ + "type":"list", + "member":{"shape":"RetrievalResultContentColumn"}, "sensitive":true }, + "RetrievalResultContentType":{ + "type":"string", + "enum":[ + "TEXT", + "IMAGE", + "ROW" + ] + }, "RetrievalResultCustomDocumentLocation":{ "type":"structure", "members":{ @@ -4306,6 +4470,16 @@ }, "documentation":"

Contains information about the location of a document in a custom data source.

" }, + "RetrievalResultKendraDocumentLocation":{ + "type":"structure", + "members":{ + "uri":{ + "shape":"String", + "documentation":"

The document's uri.

" + } + }, + "documentation":"

The location of a result in Amazon Kendra.

" + }, "RetrievalResultLocation":{ "type":"structure", "required":["type"], @@ -4318,6 +4492,10 @@ "shape":"RetrievalResultCustomDocumentLocation", "documentation":"

Specifies the location of a document in a custom data source.

" }, + "kendraDocumentLocation":{ + "shape":"RetrievalResultKendraDocumentLocation", + "documentation":"

The location of a document in Amazon Kendra.

" + }, "s3Location":{ "shape":"RetrievalResultS3Location", "documentation":"

The S3 data source location.

" @@ -4330,6 +4508,10 @@ "shape":"RetrievalResultSharePointLocation", "documentation":"

The SharePoint data source location.

" }, + "sqlLocation":{ + "shape":"RetrievalResultSqlLocation", + "documentation":"

Specifies information about the SQL query used to retrieve the result.

" + }, "type":{ "shape":"RetrievalResultLocationType", "documentation":"

The type of data source location.

" @@ -4339,7 +4521,7 @@ "documentation":"

The web URL/URLs data source location.

" } }, - "documentation":"

Contains information about the data source location.

This data type is used in the following API operations:

", + "documentation":"

Contains information about the data source location.

This data type is used in the following API operations:

", "sensitive":true }, "RetrievalResultLocationType":{ @@ -4350,7 +4532,9 @@ "CONFLUENCE", "SALESFORCE", "SHAREPOINT", - "CUSTOM" + "CUSTOM", + "KENDRA", + "SQL" ] }, "RetrievalResultMetadata":{ @@ -4401,6 +4585,16 @@ }, "documentation":"

The SharePoint data source location.

" }, + "RetrievalResultSqlLocation":{ + "type":"structure", + "members":{ + "query":{ + "shape":"String", + "documentation":"

The SQL query used to retrieve the result.

" + } + }, + "documentation":"

Contains information about the SQL query used to retrieve the result.

" + }, "RetrievalResultWebLocation":{ "type":"structure", "members":{ @@ -5038,6 +5232,36 @@ "documentation":"

Contains the part of the generated text that contains a citation, alongside where it begins and ends.

This data type is used in the following API operations:

", "sensitive":true }, + "TextToSqlConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "knowledgeBaseConfiguration":{ + "shape":"TextToSqlKnowledgeBaseConfiguration", + "documentation":"

Specifies configurations for a knowledge base to use in transformation.

" + }, + "type":{ + "shape":"TextToSqlConfigurationType", + "documentation":"

The type of resource to use in transformation.

" + } + }, + "documentation":"

Contains configurations for transforming text to SQL.

" + }, + "TextToSqlConfigurationType":{ + "type":"string", + "enum":["KNOWLEDGE_BASE"] + }, + "TextToSqlKnowledgeBaseConfiguration":{ + "type":"structure", + "required":["knowledgeBaseArn"], + "members":{ + "knowledgeBaseArn":{ + "shape":"KnowledgeBaseArn", + "documentation":"

The ARN of the knowledge base

" + } + }, + "documentation":"

Contains configurations for a knowledge base to use in transformation.

" + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -5143,6 +5367,21 @@ "event":true, "sensitive":true }, + "TransformationConfiguration":{ + "type":"structure", + "required":["mode"], + "members":{ + "mode":{ + "shape":"QueryTransformationMode", + "documentation":"

The mode of the transformation.

" + }, + "textToSqlConfiguration":{ + "shape":"TextToSqlConfiguration", + "documentation":"

Specifies configurations for transforming text to SQL.

" + } + }, + "documentation":"

Contains configurations for transforming the natural language query into SQL.

" + }, "Type":{ "type":"string", "enum":[ diff --git a/botocore/data/bedrock-agent/2023-06-05/service-2.json b/botocore/data/bedrock-agent/2023-06-05/service-2.json index 32fa7c7b13..a163630509 100644 --- a/botocore/data/bedrock-agent/2023-06-05/service-2.json +++ b/botocore/data/bedrock-agent/2023-06-05/service-2.json @@ -2397,12 +2397,34 @@ }, "documentation":"

Defines tools. The model automatically decides whether to call a tool or to generate text instead. For more information, see Use a tool to complete an Amazon Bedrock model response.

" }, + "AwsDataCatalogTableName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^.*\\.*$" + }, + "AwsDataCatalogTableNames":{ + "type":"list", + "member":{"shape":"AwsDataCatalogTableName"}, + "max":1000, + "min":1 + }, "BasePromptTemplate":{ "type":"string", "max":100000, "min":1, "sensitive":true }, + "BedrockDataAutomationConfiguration":{ + "type":"structure", + "members":{ + "parsingModality":{ + "shape":"ParsingModality", + "documentation":"

Specifies whether to enable parsing of multimodal data, including both text and/or images.

" + } + }, + "documentation":"

Contains configurations for using Amazon Bedrock Data Automation as the parser for ingesting your data sources.

" + }, "BedrockEmbeddingModelArn":{ "type":"string", "max":2048, @@ -2429,7 +2451,11 @@ "members":{ "modelArn":{ "shape":"BedrockModelArn", - "documentation":"

The ARN of the foundation model or inference profile.

" + "documentation":"

The ARN of the foundation model or inference profile to use for parsing.

" + }, + "parsingModality":{ + "shape":"ParsingModality", + "documentation":"

Specifies whether to enable parsing of multimodal data, including both text and/or images.

" }, "parsingPrompt":{ "shape":"ParsingPrompt", @@ -3201,8 +3227,7 @@ "required":[ "knowledgeBaseConfiguration", "name", - "roleArn", - "storageConfiguration" + "roleArn" ], "members":{ "clientToken":{ @@ -3419,6 +3444,30 @@ "OVERRIDDEN" ] }, + "CuratedQueries":{ + "type":"list", + "member":{"shape":"CuratedQuery"}, + "max":10, + "min":0 + }, + "CuratedQuery":{ + "type":"structure", + "required":[ + "naturalLanguage", + "sql" + ], + "members":{ + "naturalLanguage":{ + "shape":"NaturalLanguageString", + "documentation":"

An example natural language query.

" + }, + "sql":{ + "shape":"SqlString", + "documentation":"

The SQL equivalent of the natural language query.

" + } + }, + "documentation":"

Contains configurations for a query, each of which defines information about example queries to help the query engine generate appropriate SQL queries.

" + }, "CustomContent":{ "type":"structure", "required":[ @@ -3690,7 +3739,8 @@ "CONFLUENCE", "SALESFORCE", "SHAREPOINT", - "CUSTOM" + "CUSTOM", + "REDSHIFT_METADATA" ] }, "DateTimestamp":{ @@ -4122,6 +4172,11 @@ "max":200, "min":1 }, + "DescriptionString":{ + "type":"string", + "max":200, + "min":1 + }, "Dimensions":{ "type":"integer", "box":true, @@ -5868,6 +5923,13 @@ "type":"string", "pattern":"^[0-9a-zA-Z]{10}$" }, + "IncludeExclude":{ + "type":"string", + "enum":[ + "INCLUDE", + "EXCLUDE" + ] + }, "IncompatibleConnectionDataTypeFlowValidationDetails":{ "type":"structure", "required":["connection"], @@ -6229,6 +6291,21 @@ }, "documentation":"

Contains configurations for an iterator node in a flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output.

The output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node.

" }, + "KendraIndexArn":{ + "type":"string", + "pattern":"^arn:aws(|-cn|-us-gov):kendra:[a-z0-9-]{1,20}:([0-9]{12}|):index/([a-zA-Z0-9][a-zA-Z0-9-]{35}|[a-zA-Z0-9][a-zA-Z0-9-]{35}-[a-zA-Z0-9][a-zA-Z0-9-]{35})$" + }, + "KendraKnowledgeBaseConfiguration":{ + "type":"structure", + "required":["kendraIndexArn"], + "members":{ + "kendraIndexArn":{ + "shape":"KendraIndexArn", + "documentation":"

The ARN of the Amazon Kendra index.

" + } + }, + "documentation":"

Settings for an Amazon Kendra knowledge base.

" + }, "Key":{ "type":"string", "max":200, @@ -6251,7 +6328,6 @@ "name", "roleArn", "status", - "storageConfiguration", "updatedAt" ], "members":{ @@ -6312,6 +6388,14 @@ "type":"structure", "required":["type"], "members":{ + "kendraKnowledgeBaseConfiguration":{ + "shape":"KendraKnowledgeBaseConfiguration", + "documentation":"

Settings for an Amazon Kendra knowledge base.

" + }, + "sqlKnowledgeBaseConfiguration":{ + "shape":"SqlKnowledgeBaseConfiguration", + "documentation":"

Specifies configurations for a knowledge base connected to an SQL database.

" + }, "type":{ "shape":"KnowledgeBaseType", "documentation":"

The type of data that the data source is converted into for the knowledge base.

" @@ -6487,7 +6571,11 @@ }, "KnowledgeBaseType":{ "type":"string", - "enum":["VECTOR"] + "enum":[ + "VECTOR", + "KENDRA", + "SQL" + ] }, "LambdaArn":{ "type":"string", @@ -7501,6 +7589,11 @@ "type":"string", "pattern":"^([0-9a-zA-Z][_-]?){1,100}$" }, + "NaturalLanguageString":{ + "type":"string", + "max":1000, + "min":1 + }, "NextToken":{ "type":"string", "max":2048, @@ -7639,16 +7732,24 @@ "type":"structure", "required":["parsingStrategy"], "members":{ + "bedrockDataAutomationConfiguration":{ + "shape":"BedrockDataAutomationConfiguration", + "documentation":"

If you specify BEDROCK_DATA_AUTOMATION as the parsing strategy for ingesting your data source, use this object to modify configurations for using the Amazon Bedrock Data Automation parser.

" + }, "bedrockFoundationModelConfiguration":{ "shape":"BedrockFoundationModelConfiguration", - "documentation":"

Settings for a foundation model used to parse documents for a data source.

" + "documentation":"

If you specify BEDROCK_FOUNDATION_MODEL as the parsing strategy for ingesting your data source, use this object to modify configurations for using a foundation model to parse documents.

" }, "parsingStrategy":{ "shape":"ParsingStrategy", "documentation":"

The parsing strategy for the data source.

" } }, - "documentation":"

Settings for parsing document contents. By default, the service converts the contents of each document into text before splitting it into chunks. To improve processing of PDF files with tables and images, you can configure the data source to convert the pages of text into images and use a model to describe the contents of each page.

To use a model to parse PDF documents, set the parsing strategy to BEDROCK_FOUNDATION_MODEL and specify the model or inference profile to use by ARN. You can also override the default parsing prompt with instructions for how to interpret images and tables in your documents. The following models are supported.

You can get the ARN of a model with the ListFoundationModels action. Standard model usage charges apply for the foundation model parsing strategy.

" + "documentation":"

Settings for parsing document contents. If you exclude this field, the default parser converts the contents of each document into text before splitting it into chunks. Specify the parsing strategy to use in the parsingStrategy field and include the relevant configuration, or omit it to use the Amazon Bedrock default parser. For more information, see Parsing options for your data source.

If you specify BEDROCK_DATA_AUTOMATION or BEDROCK_FOUNDATION_MODEL and it fails to parse a file, the Amazon Bedrock default parser will be used instead.

" + }, + "ParsingModality":{ + "type":"string", + "enum":["MULTIMODAL"] }, "ParsingPrompt":{ "type":"structure", @@ -7668,7 +7769,10 @@ }, "ParsingStrategy":{ "type":"string", - "enum":["BEDROCK_FOUNDATION_MODEL"] + "enum":[ + "BEDROCK_FOUNDATION_MODEL", + "BEDROCK_DATA_AUTOMATION" + ] }, "PatternObjectFilter":{ "type":"structure", @@ -8257,6 +8361,105 @@ "min":1, "pattern":"^((([0-9a-zA-Z][_-]?){1,63})|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:provisioned-model/[a-z0-9]{12}))$" }, + "QueryEngineType":{ + "type":"string", + "enum":["REDSHIFT"] + }, + "QueryExecutionTimeoutSeconds":{ + "type":"integer", + "box":true, + "max":200, + "min":1 + }, + "QueryGenerationColumn":{ + "type":"structure", + "members":{ + "description":{ + "shape":"DescriptionString", + "documentation":"

A description of the column that helps the query engine understand the contents of the column.

" + }, + "inclusion":{ + "shape":"IncludeExclude", + "documentation":"

Specifies whether to include or exclude the column during query generation. If you specify EXCLUDE, the column will be ignored. If you specify INCLUDE, all other columns in the table will be ignored.

" + }, + "name":{ + "shape":"QueryGenerationColumnName", + "documentation":"

The name of the column for which the other fields in this object apply.

" + } + }, + "documentation":"

Contains information about a column in the current table for the query engine to consider.

" + }, + "QueryGenerationColumnName":{ + "type":"string", + "max":127, + "min":1 + }, + "QueryGenerationColumns":{ + "type":"list", + "member":{"shape":"QueryGenerationColumn"} + }, + "QueryGenerationConfiguration":{ + "type":"structure", + "members":{ + "executionTimeoutSeconds":{ + "shape":"QueryExecutionTimeoutSeconds", + "documentation":"

The time after which query generation will time out.

" + }, + "generationContext":{ + "shape":"QueryGenerationContext", + "documentation":"

Specifies configurations for context to use during query generation.

" + } + }, + "documentation":"

Contains configurations for query generation. For more information, see Build a knowledge base by connecting to a structured data source in the Amazon Bedrock User Guide..

" + }, + "QueryGenerationContext":{ + "type":"structure", + "members":{ + "curatedQueries":{ + "shape":"CuratedQueries", + "documentation":"

An array of objects, each of which defines information about example queries to help the query engine generate appropriate SQL queries.

" + }, + "tables":{ + "shape":"QueryGenerationTables", + "documentation":"

An array of objects, each of which defines information about a table in the database.

" + } + }, + "documentation":"

>Contains configurations for context to use during query generation.

", + "sensitive":true + }, + "QueryGenerationTable":{ + "type":"structure", + "required":["name"], + "members":{ + "columns":{ + "shape":"QueryGenerationColumns", + "documentation":"

An array of objects, each of which defines information about a column in the table.

" + }, + "description":{ + "shape":"DescriptionString", + "documentation":"

A description of the table that helps the query engine understand the contents of the table.

" + }, + "inclusion":{ + "shape":"IncludeExclude", + "documentation":"

Specifies whether to include or exclude the table during query generation. If you specify EXCLUDE, the table will be ignored. If you specify INCLUDE, all other tables will be ignored.

" + }, + "name":{ + "shape":"QueryGenerationTableName", + "documentation":"

The name of the table for which the other fields in this object apply.

" + } + }, + "documentation":"

Contains information about a table for the query engine to consider.

" + }, + "QueryGenerationTableName":{ + "type":"string", + "pattern":"^.*\\..*\\..*$" + }, + "QueryGenerationTables":{ + "type":"list", + "member":{"shape":"QueryGenerationTable"}, + "max":50, + "min":0 + }, "RdsArn":{ "type":"string", "pattern":"^arn:aws(|-cn|-us-gov):rds:[a-zA-Z0-9-]*:[0-9]{12}:cluster:[a-zA-Z0-9-]{1,63}$" @@ -8408,6 +8611,203 @@ "min":0, "pattern":"^.*$" }, + "RedshiftClusterIdentifier":{ + "type":"string", + "max":63, + "min":1 + }, + "RedshiftConfiguration":{ + "type":"structure", + "required":[ + "queryEngineConfiguration", + "storageConfigurations" + ], + "members":{ + "queryEngineConfiguration":{ + "shape":"RedshiftQueryEngineConfiguration", + "documentation":"

Specifies configurations for an Amazon Redshift query engine.

" + }, + "queryGenerationConfiguration":{ + "shape":"QueryGenerationConfiguration", + "documentation":"

Specifies configurations for generating queries.

" + }, + "storageConfigurations":{ + "shape":"RedshiftQueryEngineStorageConfigurations", + "documentation":"

Specifies configurations for Amazon Redshift database storage.

" + } + }, + "documentation":"

Contains configurations for an Amazon Redshift database. For more information, see Build a knowledge base by connecting to a structured data source in the Amazon Bedrock User Guide.

" + }, + "RedshiftDatabase":{ + "type":"string", + "max":200, + "min":1 + }, + "RedshiftProvisionedAuthConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "databaseUser":{ + "shape":"String", + "documentation":"

The database username for authentication to an Amazon Redshift provisioned data warehouse.

" + }, + "type":{ + "shape":"RedshiftProvisionedAuthType", + "documentation":"

The type of authentication to use.

" + }, + "usernamePasswordSecretArn":{ + "shape":"SecretArn", + "documentation":"

The ARN of an Secrets Manager secret for authentication.

" + } + }, + "documentation":"

Contains configurations for authentication to an Amazon Redshift provisioned data warehouse. Specify the type of authentication to use in the type field and include the corresponding field. If you specify IAM authentication, you don't need to include another field.

" + }, + "RedshiftProvisionedAuthType":{ + "type":"string", + "enum":[ + "IAM", + "USERNAME_PASSWORD", + "USERNAME" + ] + }, + "RedshiftProvisionedConfiguration":{ + "type":"structure", + "required":[ + "authConfiguration", + "clusterIdentifier" + ], + "members":{ + "authConfiguration":{ + "shape":"RedshiftProvisionedAuthConfiguration", + "documentation":"

Specifies configurations for authentication to Amazon Redshift.

" + }, + "clusterIdentifier":{ + "shape":"RedshiftClusterIdentifier", + "documentation":"

The ID of the Amazon Redshift cluster.

" + } + }, + "documentation":"

Contains configurations for a provisioned Amazon Redshift query engine.

" + }, + "RedshiftQueryEngineAwsDataCatalogStorageConfiguration":{ + "type":"structure", + "required":["tableNames"], + "members":{ + "tableNames":{ + "shape":"AwsDataCatalogTableNames", + "documentation":"

A list of names of the tables to use.

" + } + }, + "documentation":"

Contains configurations for storage in Glue Data Catalog.

" + }, + "RedshiftQueryEngineConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "provisionedConfiguration":{ + "shape":"RedshiftProvisionedConfiguration", + "documentation":"

Specifies configurations for a provisioned Amazon Redshift query engine.

" + }, + "serverlessConfiguration":{ + "shape":"RedshiftServerlessConfiguration", + "documentation":"

Specifies configurations for a serverless Amazon Redshift query engine.

" + }, + "type":{ + "shape":"RedshiftQueryEngineType", + "documentation":"

The type of query engine.

" + } + }, + "documentation":"

Contains configurations for an Amazon Redshift query engine. Specify the type of query engine in type and include the corresponding field. For more information, see Build a knowledge base by connecting to a structured data source in the Amazon Bedrock User Guide.

" + }, + "RedshiftQueryEngineRedshiftStorageConfiguration":{ + "type":"structure", + "required":["databaseName"], + "members":{ + "databaseName":{ + "shape":"RedshiftDatabase", + "documentation":"

The name of the Amazon Redshift database.

" + } + }, + "documentation":"

Contains configurations for storage in Amazon Redshift.

" + }, + "RedshiftQueryEngineStorageConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "awsDataCatalogConfiguration":{ + "shape":"RedshiftQueryEngineAwsDataCatalogStorageConfiguration", + "documentation":"

Specifies configurations for storage in Glue Data Catalog.

" + }, + "redshiftConfiguration":{ + "shape":"RedshiftQueryEngineRedshiftStorageConfiguration", + "documentation":"

Specifies configurations for storage in Amazon Redshift.

" + }, + "type":{ + "shape":"RedshiftQueryEngineStorageType", + "documentation":"

The data storage service to use.

" + } + }, + "documentation":"

Contains configurations for Amazon Redshift data storage. Specify the data storage service to use in the type field and include the corresponding field. For more information, see Build a knowledge base by connecting to a structured data source in the Amazon Bedrock User Guide.

" + }, + "RedshiftQueryEngineStorageConfigurations":{ + "type":"list", + "member":{"shape":"RedshiftQueryEngineStorageConfiguration"}, + "max":1, + "min":1 + }, + "RedshiftQueryEngineStorageType":{ + "type":"string", + "enum":[ + "REDSHIFT", + "AWS_DATA_CATALOG" + ] + }, + "RedshiftQueryEngineType":{ + "type":"string", + "enum":[ + "SERVERLESS", + "PROVISIONED" + ] + }, + "RedshiftServerlessAuthConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"RedshiftServerlessAuthType", + "documentation":"

The type of authentication to use.

" + }, + "usernamePasswordSecretArn":{ + "shape":"SecretArn", + "documentation":"

The ARN of an Secrets Manager secret for authentication.

" + } + }, + "documentation":"

Specifies configurations for authentication to a Redshift Serverless. Specify the type of authentication to use in the type field and include the corresponding field. If you specify IAM authentication, you don't need to include another field.

" + }, + "RedshiftServerlessAuthType":{ + "type":"string", + "enum":[ + "IAM", + "USERNAME_PASSWORD" + ] + }, + "RedshiftServerlessConfiguration":{ + "type":"structure", + "required":[ + "authConfiguration", + "workgroupArn" + ], + "members":{ + "authConfiguration":{ + "shape":"RedshiftServerlessAuthConfiguration", + "documentation":"

Specifies configurations for authentication to an Amazon Redshift provisioned data warehouse.

" + }, + "workgroupArn":{ + "shape":"WorkgroupArn", + "documentation":"

The ARN of the Amazon Redshift workgroup.

" + } + }, + "documentation":"

Contains configurations for authentication to Amazon Redshift Serverless.

" + }, "RelayConversationHistory":{ "type":"string", "enum":[ @@ -8804,6 +9204,26 @@ }, "documentation":"

Defines a specific tool that the model must request. No text is generated but the results of tool use are sent back to the model to help generate a response. For more information, see Use a tool to complete an Amazon Bedrock model response.

" }, + "SqlKnowledgeBaseConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "redshiftConfiguration":{ + "shape":"RedshiftConfiguration", + "documentation":"

Specifies configurations for a knowledge base connected to an Amazon Redshift database.

" + }, + "type":{ + "shape":"QueryEngineType", + "documentation":"

The type of SQL database to connect to the knowledge base.

" + } + }, + "documentation":"

Contains configurations for a knowledge base connected to an SQL database. Specify the SQL database type in the type field and include the corresponding field. For more information, see Build a knowledge base by connecting to a structured data source in the Amazon Bedrock User Guide.

" + }, + "SqlString":{ + "type":"string", + "max":1000, + "min":1 + }, "StartIngestionJobRequest":{ "type":"structure", "required":[ @@ -8969,6 +9389,42 @@ "min":1, "sensitive":true }, + "SupplementalDataStorageConfiguration":{ + "type":"structure", + "required":["storageLocations"], + "members":{ + "storageLocations":{ + "shape":"SupplementalDataStorageLocations", + "documentation":"

A list of objects specifying storage locations for images extracted from multimodal documents in your data source.

" + } + }, + "documentation":"

Specifies configurations for the storage location of the images extracted from multimodal documents in your data source. These images can be retrieved and returned to the end user.

" + }, + "SupplementalDataStorageLocation":{ + "type":"structure", + "required":["type"], + "members":{ + "s3Location":{ + "shape":"S3Location", + "documentation":"

Contains information about the Amazon S3 location for the extracted images.

" + }, + "type":{ + "shape":"SupplementalDataStorageLocationType", + "documentation":"

Specifies the storage service used for this location.

" + } + }, + "documentation":"

Contains information about a storage location for images extracted from multimodal documents in your data source.

" + }, + "SupplementalDataStorageLocationType":{ + "type":"string", + "enum":["S3"] + }, + "SupplementalDataStorageLocations":{ + "type":"list", + "member":{"shape":"SupplementalDataStorageLocation"}, + "max":1, + "min":1 + }, "SystemContentBlock":{ "type":"structure", "members":{ @@ -9907,8 +10363,7 @@ "knowledgeBaseConfiguration", "knowledgeBaseId", "name", - "roleArn", - "storageConfiguration" + "roleArn" ], "members":{ "description":{ @@ -10122,7 +10577,7 @@ }, "parsingConfiguration":{ "shape":"ParsingConfiguration", - "documentation":"

A custom parser for data source documents.

" + "documentation":"

Configurations for a parser to use for parsing documents in your data source. If you exclude this field, the default parser will be used.

" } }, "documentation":"

Contains details about how to ingest the documents in a data source.

" @@ -10138,6 +10593,10 @@ "embeddingModelConfiguration":{ "shape":"EmbeddingModelConfiguration", "documentation":"

The embeddings model configuration details for the vector model used in Knowledge Base.

" + }, + "supplementalDataStorageConfiguration":{ + "shape":"SupplementalDataStorageConfiguration", + "documentation":"

If you include multimodal data from your data source, use this object to specify configurations for the storage location of the images extracted from your documents. These images can be retrieved and returned to the end user. They can also be used in generation when using RetrieveAndGenerate.

" } }, "documentation":"

Contains details about the model used to create vector embeddings for the knowledge base.

" @@ -10218,6 +10677,10 @@ } }, "documentation":"

The configuration of the URL/URLs for the web content that you want to crawl. You should be authorized to crawl the URLs.

" + }, + "WorkgroupArn":{ + "type":"string", + "pattern":"^(arn:(aws(-[a-z]+)*):redshift-serverless:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:workgroup/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$" } }, "documentation":"

Describes the API operations for creating and managing Amazon Bedrock agents.

" diff --git a/botocore/data/bedrock-data-automation-runtime/2024-06-13/endpoint-rule-set-1.json b/botocore/data/bedrock-data-automation-runtime/2024-06-13/endpoint-rule-set-1.json new file mode 100644 index 0000000000..388569c584 --- /dev/null +++ b/botocore/data/bedrock-data-automation-runtime/2024-06-13/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://bedrock-data-automation-runtime.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://bedrock-data-automation-runtime.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/bedrock-data-automation-runtime/2024-06-13/paginators-1.json b/botocore/data/bedrock-data-automation-runtime/2024-06-13/paginators-1.json new file mode 100644 index 0000000000..ea142457a6 --- /dev/null +++ b/botocore/data/bedrock-data-automation-runtime/2024-06-13/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/bedrock-data-automation-runtime/2024-06-13/service-2.json b/botocore/data/bedrock-data-automation-runtime/2024-06-13/service-2.json new file mode 100644 index 0000000000..859ea45c7f --- /dev/null +++ b/botocore/data/bedrock-data-automation-runtime/2024-06-13/service-2.json @@ -0,0 +1,394 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2024-06-13", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"bedrock-data-automation-runtime", + "jsonVersion":"1.1", + "protocol":"json", + "protocols":["json"], + "serviceFullName":"Runtime for Amazon Bedrock Data Automation", + "serviceId":"Bedrock Data Automation Runtime", + "signatureVersion":"v4", + "signingName":"bedrock", + "targetPrefix":"AmazonBedrockKeystoneRuntimeService", + "uid":"bedrock-data-automation-runtime-2024-06-13" + }, + "operations":{ + "GetDataAutomationStatus":{ + "name":"GetDataAutomationStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDataAutomationStatusRequest"}, + "output":{"shape":"GetDataAutomationStatusResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

API used to get data automation status.

" + }, + "InvokeDataAutomationAsync":{ + "name":"InvokeDataAutomationAsync", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"InvokeDataAutomationAsyncRequest"}, + "output":{"shape":"InvokeDataAutomationAsyncResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Async API: Invoke data automation.

", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception will be thrown when customer does not have access to API.

", + "exception":true + }, + "AutomationJobStatus":{ + "type":"string", + "documentation":"

List of status supported by automation jobs

", + "enum":[ + "Created", + "InProgress", + "Success", + "ServiceError", + "ClientError" + ] + }, + "Blueprint":{ + "type":"structure", + "required":["blueprintArn"], + "members":{ + "blueprintArn":{ + "shape":"BlueprintArn", + "documentation":"

Arn of blueprint.

" + }, + "version":{ + "shape":"BlueprintVersion", + "documentation":"

Version of blueprint.

" + }, + "stage":{ + "shape":"BlueprintStage", + "documentation":"

Stage of blueprint.

" + } + }, + "documentation":"

Structure for single blueprint entity.

" + }, + "BlueprintArn":{ + "type":"string", + "documentation":"

ARN of a Blueprint

", + "max":128, + "min":0, + "pattern":"arn:aws(|-cn|-us-gov):bedrock:[a-zA-Z0-9-]*:(aws|[0-9]{12}):blueprint/(bedrock-data-insights-public-[a-zA-Z0-9-_]{1,30}|bedrock-data-automation-public-[a-zA-Z0-9-_]{1,30}|[a-zA-Z0-9-]{12,36})" + }, + "BlueprintList":{ + "type":"list", + "member":{ + "shape":"Blueprint", + "documentation":"

Blueprint.

" + }, + "documentation":"

Blueprint list.

", + "min":1 + }, + "BlueprintStage":{ + "type":"string", + "documentation":"

Blueprint stage enum.

", + "enum":[ + "DEVELOPMENT", + "LIVE" + ] + }, + "BlueprintVersion":{ + "type":"string", + "documentation":"

Blueprint version.

", + "max":128, + "min":1, + "pattern":"[0-9]*" + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "DataAutomationArn":{ + "type":"string", + "documentation":"

Data automation arn.

", + "max":128, + "min":1, + "pattern":"arn:aws(|-cn|-us-gov):bedrock:[a-zA-Z0-9-]*:(aws|[0-9]{12}):data-automation-project/[a-zA-Z0-9-_]+" + }, + "DataAutomationConfiguration":{ + "type":"structure", + "required":["dataAutomationArn"], + "members":{ + "dataAutomationArn":{ + "shape":"DataAutomationArn", + "documentation":"

Data automation arn.

" + }, + "stage":{ + "shape":"DataAutomationStage", + "documentation":"

Data automation stage.

" + } + }, + "documentation":"

Data automation configuration.

" + }, + "DataAutomationStage":{ + "type":"string", + "documentation":"

Data automation stage.

", + "enum":[ + "LIVE", + "DEVELOPMENT" + ] + }, + "EncryptionConfiguration":{ + "type":"structure", + "required":["kmsKeyId"], + "members":{ + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

KMS key id.

" + }, + "kmsEncryptionContext":{ + "shape":"EncryptionContextMap", + "documentation":"

KMS encryption context.

" + } + }, + "documentation":"

Encryption configuration.

" + }, + "EncryptionContextKey":{ + "type":"string", + "documentation":"

Excryption context key.

", + "max":2000, + "min":1, + "pattern":".*\\S.*" + }, + "EncryptionContextMap":{ + "type":"map", + "key":{"shape":"EncryptionContextKey"}, + "value":{"shape":"EncryptionContextValue"}, + "max":10, + "min":1 + }, + "EncryptionContextValue":{ + "type":"string", + "documentation":"

Encryption context value.

", + "max":2000, + "min":1, + "pattern":".*\\S.*" + }, + "EventBridgeConfiguration":{ + "type":"structure", + "required":["eventBridgeEnabled"], + "members":{ + "eventBridgeEnabled":{ + "shape":"Boolean", + "documentation":"

Event bridge flag.

" + } + }, + "documentation":"

Event bridge configuration.

" + }, + "GetDataAutomationStatusRequest":{ + "type":"structure", + "required":["invocationArn"], + "members":{ + "invocationArn":{ + "shape":"InvocationArn", + "documentation":"

Invocation arn.

" + } + }, + "documentation":"

Structure for request of GetDataAutomationStatus API.

" + }, + "GetDataAutomationStatusResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"AutomationJobStatus", + "documentation":"

Job Status.

" + }, + "errorType":{ + "shape":"String", + "documentation":"

Error Type.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

Error Message.

" + }, + "outputConfiguration":{ + "shape":"OutputConfiguration", + "documentation":"

Output configuration.

" + } + }, + "documentation":"

Response of GetDataAutomationStatus API.

" + }, + "IdempotencyToken":{ + "type":"string", + "documentation":"

Client specified token used for idempotency checks

", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, + "InputConfiguration":{ + "type":"structure", + "required":["s3Uri"], + "members":{ + "s3Uri":{ + "shape":"S3Uri", + "documentation":"

S3 uri.

" + } + }, + "documentation":"

Input configuration.

" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception is for any internal un-expected service errors.

", + "exception":true, + "fault":true + }, + "InvocationArn":{ + "type":"string", + "documentation":"

Invocation arn.

", + "max":128, + "min":1, + "pattern":"arn:aws(|-cn|-us-gov):bedrock:[a-zA-Z0-9-]*:[0-9]{12}:(insights-invocation|data-automation-invocation)/[a-zA-Z0-9-_]+" + }, + "InvokeDataAutomationAsyncRequest":{ + "type":"structure", + "required":[ + "inputConfiguration", + "outputConfiguration" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

Idempotency token.

", + "idempotencyToken":true + }, + "inputConfiguration":{ + "shape":"InputConfiguration", + "documentation":"

Input configuration.

" + }, + "outputConfiguration":{ + "shape":"OutputConfiguration", + "documentation":"

Output configuration.

" + }, + "dataAutomationConfiguration":{ + "shape":"DataAutomationConfiguration", + "documentation":"

Data automation configuration.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

Encryption configuration.

" + }, + "notificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

Notification configuration.

" + }, + "blueprints":{ + "shape":"BlueprintList", + "documentation":"

Blueprint list.

" + } + }, + "documentation":"

Invoke Data Automation Async Request

" + }, + "InvokeDataAutomationAsyncResponse":{ + "type":"structure", + "required":["invocationArn"], + "members":{ + "invocationArn":{ + "shape":"InvocationArn", + "documentation":"

ARN of the automation job

" + } + }, + "documentation":"

Invoke Data Automation Async Response

" + }, + "NonBlankString":{ + "type":"string", + "pattern":"[\\s\\S]*" + }, + "NonEmptyString":{ + "type":"string", + "pattern":".*\\S.*" + }, + "NotificationConfiguration":{ + "type":"structure", + "required":["eventBridgeConfiguration"], + "members":{ + "eventBridgeConfiguration":{ + "shape":"EventBridgeConfiguration", + "documentation":"

Event bridge configuration.

" + } + }, + "documentation":"

Notification configuration.

" + }, + "OutputConfiguration":{ + "type":"structure", + "required":["s3Uri"], + "members":{ + "s3Uri":{ + "shape":"S3Uri", + "documentation":"

S3 uri.

" + } + }, + "documentation":"

Output configuration.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception will be thrown when resource provided from customer not found.

", + "exception":true + }, + "S3Uri":{ + "type":"string", + "documentation":"

A path in S3

", + "max":1024, + "min":1, + "pattern":"s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]([^\\x00-\\x1F\\x7F\\{^}%`\\]\">\\[~<#|]*)?" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception will be thrown when service quota is exceeded.

", + "exception":true + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception will be thrown when customer reached API TPS limit.

", + "exception":true + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception will be thrown when customer provided invalid parameters.

", + "exception":true + } + }, + "documentation":"

Amazon Bedrock Keystone Runtime

" +} diff --git a/botocore/data/bedrock-data-automation/2023-07-26/endpoint-rule-set-1.json b/botocore/data/bedrock-data-automation/2023-07-26/endpoint-rule-set-1.json new file mode 100644 index 0000000000..436d8e002c --- /dev/null +++ b/botocore/data/bedrock-data-automation/2023-07-26/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://bedrock-data-automation-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://bedrock-data-automation-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://bedrock-data-automation.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://bedrock-data-automation.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/bedrock-data-automation/2023-07-26/paginators-1.json b/botocore/data/bedrock-data-automation/2023-07-26/paginators-1.json new file mode 100644 index 0000000000..741799cc0d --- /dev/null +++ b/botocore/data/bedrock-data-automation/2023-07-26/paginators-1.json @@ -0,0 +1,16 @@ +{ + "pagination": { + "ListBlueprints": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "blueprints" + }, + "ListDataAutomationProjects": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "projects" + } + } +} diff --git a/botocore/data/bedrock-data-automation/2023-07-26/service-2.json b/botocore/data/bedrock-data-automation/2023-07-26/service-2.json new file mode 100644 index 0000000000..d1f082192f --- /dev/null +++ b/botocore/data/bedrock-data-automation/2023-07-26/service-2.json @@ -0,0 +1,1247 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2023-07-26", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"bedrock-data-automation", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"Data Automation for Amazon Bedrock", + "serviceId":"Bedrock Data Automation", + "signatureVersion":"v4", + "signingName":"bedrock", + "uid":"bedrock-data-automation-2023-07-26" + }, + "operations":{ + "CreateBlueprint":{ + "name":"CreateBlueprint", + "http":{ + "method":"PUT", + "requestUri":"/blueprints/", + "responseCode":201 + }, + "input":{"shape":"CreateBlueprintRequest"}, + "output":{"shape":"CreateBlueprintResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates an Amazon Bedrock Keystone Blueprint

", + "idempotent":true + }, + "CreateBlueprintVersion":{ + "name":"CreateBlueprintVersion", + "http":{ + "method":"POST", + "requestUri":"/blueprints/{blueprintArn}/versions/", + "responseCode":201 + }, + "input":{"shape":"CreateBlueprintVersionRequest"}, + "output":{"shape":"CreateBlueprintVersionResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Creates a new version of an existing Amazon Bedrock Keystone Blueprint

", + "idempotent":true + }, + "CreateDataAutomationProject":{ + "name":"CreateDataAutomationProject", + "http":{ + "method":"PUT", + "requestUri":"/data-automation-projects/", + "responseCode":201 + }, + "input":{"shape":"CreateDataAutomationProjectRequest"}, + "output":{"shape":"CreateDataAutomationProjectResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates an Amazon Bedrock Keystone DataAutomationProject

", + "idempotent":true + }, + "DeleteBlueprint":{ + "name":"DeleteBlueprint", + "http":{ + "method":"DELETE", + "requestUri":"/blueprints/{blueprintArn}/", + "responseCode":204 + }, + "input":{"shape":"DeleteBlueprintRequest"}, + "output":{"shape":"DeleteBlueprintResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes an existing Amazon Bedrock Keystone Blueprint

", + "idempotent":true + }, + "DeleteDataAutomationProject":{ + "name":"DeleteDataAutomationProject", + "http":{ + "method":"DELETE", + "requestUri":"/data-automation-projects/{projectArn}/", + "responseCode":204 + }, + "input":{"shape":"DeleteDataAutomationProjectRequest"}, + "output":{"shape":"DeleteDataAutomationProjectResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes an existing Amazon Bedrock Keystone DataAutomationProject

", + "idempotent":true + }, + "GetBlueprint":{ + "name":"GetBlueprint", + "http":{ + "method":"POST", + "requestUri":"/blueprints/{blueprintArn}/", + "responseCode":200 + }, + "input":{"shape":"GetBlueprintRequest"}, + "output":{"shape":"GetBlueprintResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets an existing Amazon Bedrock Keystone Blueprint

" + }, + "GetDataAutomationProject":{ + "name":"GetDataAutomationProject", + "http":{ + "method":"POST", + "requestUri":"/data-automation-projects/{projectArn}/", + "responseCode":200 + }, + "input":{"shape":"GetDataAutomationProjectRequest"}, + "output":{"shape":"GetDataAutomationProjectResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets an existing Amazon Bedrock Keystone DataAutomationProject

" + }, + "ListBlueprints":{ + "name":"ListBlueprints", + "http":{ + "method":"POST", + "requestUri":"/blueprints/", + "responseCode":200 + }, + "input":{"shape":"ListBlueprintsRequest"}, + "output":{"shape":"ListBlueprintsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists all existing Amazon Bedrock Keystone Blueprints

" + }, + "ListDataAutomationProjects":{ + "name":"ListDataAutomationProjects", + "http":{ + "method":"POST", + "requestUri":"/data-automation-projects/", + "responseCode":200 + }, + "input":{"shape":"ListDataAutomationProjectsRequest"}, + "output":{"shape":"ListDataAutomationProjectsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists all existing Amazon Bedrock Keystone DataAutomationProjects

" + }, + "UpdateBlueprint":{ + "name":"UpdateBlueprint", + "http":{ + "method":"PUT", + "requestUri":"/blueprints/{blueprintArn}/", + "responseCode":200 + }, + "input":{"shape":"UpdateBlueprintRequest"}, + "output":{"shape":"UpdateBlueprintResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates an existing Amazon Bedrock Blueprint

", + "idempotent":true + }, + "UpdateDataAutomationProject":{ + "name":"UpdateDataAutomationProject", + "http":{ + "method":"PUT", + "requestUri":"/data-automation-projects/{projectArn}/", + "responseCode":200 + }, + "input":{"shape":"UpdateDataAutomationProjectRequest"}, + "output":{"shape":"UpdateDataAutomationProjectResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates an existing Amazon Bedrock DataAutomationProject

", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception is thrown when a request is denied per access permissions

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AudioExtractionCategory":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"}, + "types":{"shape":"AudioExtractionCategoryTypes"} + }, + "documentation":"

Category of Audio Extraction

" + }, + "AudioExtractionCategoryType":{ + "type":"string", + "enum":[ + "AUDIO_CONTENT_MODERATION", + "CHAPTER_CONTENT_MODERATION", + "TRANSCRIPT" + ] + }, + "AudioExtractionCategoryTypes":{ + "type":"list", + "member":{"shape":"AudioExtractionCategoryType"}, + "documentation":"

List of Audio Extraction Category Type

" + }, + "AudioStandardExtraction":{ + "type":"structure", + "required":["category"], + "members":{ + "category":{"shape":"AudioExtractionCategory"} + }, + "documentation":"

Standard Extraction Configuration of Audio

" + }, + "AudioStandardGenerativeField":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"}, + "types":{"shape":"AudioStandardGenerativeFieldTypes"} + }, + "documentation":"

Standard Generative Field Configuration of Audio

" + }, + "AudioStandardGenerativeFieldType":{ + "type":"string", + "enum":[ + "AUDIO_SUMMARY", + "CHAPTER_SUMMARY", + "IAB" + ] + }, + "AudioStandardGenerativeFieldTypes":{ + "type":"list", + "member":{"shape":"AudioStandardGenerativeFieldType"}, + "documentation":"

List of Audio Standard Generative Field Type

" + }, + "AudioStandardOutputConfiguration":{ + "type":"structure", + "members":{ + "extraction":{"shape":"AudioStandardExtraction"}, + "generativeField":{"shape":"AudioStandardGenerativeField"} + }, + "documentation":"

Standard Output Configuration of Audio

" + }, + "Blueprint":{ + "type":"structure", + "required":[ + "blueprintArn", + "schema", + "type", + "creationTime", + "lastModifiedTime", + "blueprintName" + ], + "members":{ + "blueprintArn":{"shape":"BlueprintArn"}, + "schema":{"shape":"BlueprintSchema"}, + "type":{"shape":"Type"}, + "creationTime":{"shape":"DateTimestamp"}, + "lastModifiedTime":{"shape":"DateTimestamp"}, + "blueprintName":{"shape":"BlueprintName"}, + "blueprintVersion":{"shape":"BlueprintVersion"}, + "blueprintStage":{"shape":"BlueprintStage"}, + "kmsKeyId":{"shape":"KmsKeyId"}, + "kmsEncryptionContext":{"shape":"KmsEncryptionContext"} + }, + "documentation":"

Contains the information of a Blueprint.

" + }, + "BlueprintArn":{ + "type":"string", + "documentation":"

ARN of a Blueprint

", + "max":128, + "min":0, + "pattern":"arn:aws(|-cn|-us-gov):bedrock:[a-zA-Z0-9-]*:(aws|[0-9]{12}):blueprint/(bedrock-data-automation-public-[a-zA-Z0-9-_]{1,30}|[a-zA-Z0-9-]{12,36})" + }, + "BlueprintFilter":{ + "type":"structure", + "required":["blueprintArn"], + "members":{ + "blueprintArn":{"shape":"BlueprintArn"}, + "blueprintVersion":{"shape":"BlueprintVersion"}, + "blueprintStage":{"shape":"BlueprintStage"} + }, + "documentation":"

Blueprint Filter

" + }, + "BlueprintItem":{ + "type":"structure", + "required":["blueprintArn"], + "members":{ + "blueprintArn":{"shape":"BlueprintArn"}, + "blueprintVersion":{"shape":"BlueprintVersion"}, + "blueprintStage":{"shape":"BlueprintStage"} + }, + "documentation":"

Blueprint Item

" + }, + "BlueprintItems":{ + "type":"list", + "member":{"shape":"BlueprintItem"}, + "documentation":"

List of Blueprint Item

" + }, + "BlueprintName":{ + "type":"string", + "documentation":"

Name of the Blueprint

", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9-_]+", + "sensitive":true + }, + "BlueprintSchema":{ + "type":"string", + "documentation":"

Schema of the blueprint

", + "max":100000, + "min":1, + "pattern":"[a-zA-Z0-9\\s!\"\\#\\$%'&\\(\\)\\*\\+\\,\\-\\./:;=\\?@\\[\\\\\\]\\^_`\\{\\|\\}~><]+", + "sensitive":true + }, + "BlueprintStage":{ + "type":"string", + "documentation":"

Stage of the Blueprint

", + "enum":[ + "DEVELOPMENT", + "LIVE" + ] + }, + "BlueprintStageFilter":{ + "type":"string", + "documentation":"

Blueprint Stage filter

", + "enum":[ + "DEVELOPMENT", + "LIVE", + "ALL" + ] + }, + "BlueprintSummary":{ + "type":"structure", + "required":[ + "blueprintArn", + "creationTime" + ], + "members":{ + "blueprintArn":{"shape":"BlueprintArn"}, + "blueprintVersion":{"shape":"BlueprintVersion"}, + "blueprintStage":{"shape":"BlueprintStage"}, + "blueprintName":{"shape":"BlueprintName"}, + "creationTime":{"shape":"DateTimestamp"}, + "lastModifiedTime":{"shape":"DateTimestamp"} + }, + "documentation":"

Summary of a Blueprint

" + }, + "BlueprintVersion":{ + "type":"string", + "documentation":"

Blueprint Version

", + "max":128, + "min":1, + "pattern":"[0-9]*" + }, + "Blueprints":{ + "type":"list", + "member":{"shape":"BlueprintSummary"}, + "documentation":"

List of Blueprints

" + }, + "ClientToken":{ + "type":"string", + "documentation":"

Client specified token used for idempotency checks

", + "max":256, + "min":33, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception is thrown when there is a conflict performing an operation

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateBlueprintRequest":{ + "type":"structure", + "required":[ + "blueprintName", + "type", + "schema" + ], + "members":{ + "blueprintName":{"shape":"BlueprintName"}, + "type":{"shape":"Type"}, + "blueprintStage":{"shape":"BlueprintStage"}, + "schema":{"shape":"BlueprintSchema"}, + "clientToken":{ + "shape":"ClientToken", + "idempotencyToken":true + }, + "encryptionConfiguration":{"shape":"EncryptionConfiguration"} + }, + "documentation":"

Create Blueprint Request

" + }, + "CreateBlueprintResponse":{ + "type":"structure", + "required":["blueprint"], + "members":{ + "blueprint":{"shape":"Blueprint"} + }, + "documentation":"

Create Blueprint Response

" + }, + "CreateBlueprintVersionRequest":{ + "type":"structure", + "required":["blueprintArn"], + "members":{ + "blueprintArn":{ + "shape":"BlueprintArn", + "documentation":"

ARN generated at the server side when a Blueprint is created

", + "location":"uri", + "locationName":"blueprintArn" + }, + "clientToken":{ + "shape":"ClientToken", + "idempotencyToken":true + } + }, + "documentation":"

Create Blueprint Version Request

" + }, + "CreateBlueprintVersionResponse":{ + "type":"structure", + "required":["blueprint"], + "members":{ + "blueprint":{"shape":"Blueprint"} + }, + "documentation":"

Create Blueprint Version Response

" + }, + "CreateDataAutomationProjectRequest":{ + "type":"structure", + "required":[ + "projectName", + "standardOutputConfiguration" + ], + "members":{ + "projectName":{"shape":"DataAutomationProjectName"}, + "projectDescription":{"shape":"DataAutomationProjectDescription"}, + "projectStage":{"shape":"DataAutomationProjectStage"}, + "standardOutputConfiguration":{"shape":"StandardOutputConfiguration"}, + "customOutputConfiguration":{"shape":"CustomOutputConfiguration"}, + "overrideConfiguration":{"shape":"OverrideConfiguration"}, + "clientToken":{ + "shape":"ClientToken", + "idempotencyToken":true + }, + "encryptionConfiguration":{"shape":"EncryptionConfiguration"} + }, + "documentation":"

Create DataAutomationProject Request

" + }, + "CreateDataAutomationProjectResponse":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{"shape":"DataAutomationProjectArn"}, + "projectStage":{"shape":"DataAutomationProjectStage"}, + "status":{"shape":"DataAutomationProjectStatus"} + }, + "documentation":"

Create DataAutomationProject Response

" + }, + "CustomOutputConfiguration":{ + "type":"structure", + "members":{ + "blueprints":{"shape":"BlueprintItems"} + }, + "documentation":"

Custom output configuration

" + }, + "DataAutomationProject":{ + "type":"structure", + "required":[ + "projectArn", + "creationTime", + "lastModifiedTime", + "projectName", + "status" + ], + "members":{ + "projectArn":{"shape":"DataAutomationProjectArn"}, + "creationTime":{"shape":"DateTimestamp"}, + "lastModifiedTime":{"shape":"DateTimestamp"}, + "projectName":{"shape":"DataAutomationProjectName"}, + "projectStage":{"shape":"DataAutomationProjectStage"}, + "projectDescription":{"shape":"DataAutomationProjectDescription"}, + "standardOutputConfiguration":{"shape":"StandardOutputConfiguration"}, + "customOutputConfiguration":{"shape":"CustomOutputConfiguration"}, + "overrideConfiguration":{"shape":"OverrideConfiguration"}, + "status":{"shape":"DataAutomationProjectStatus"}, + "kmsKeyId":{"shape":"KmsKeyId"}, + "kmsEncryptionContext":{"shape":"KmsEncryptionContext"} + }, + "documentation":"

Contains the information of a DataAutomationProject.

" + }, + "DataAutomationProjectArn":{ + "type":"string", + "documentation":"

ARN of a DataAutomationProject

", + "max":128, + "min":0, + "pattern":"arn:aws(|-cn|-us-gov):bedrock:[a-zA-Z0-9-]*:(aws|[0-9]{12}):data-automation-project/[a-zA-Z0-9-]{12,36}" + }, + "DataAutomationProjectDescription":{ + "type":"string", + "documentation":"

Description of the DataAutomationProject

", + "sensitive":true + }, + "DataAutomationProjectFilter":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{"shape":"DataAutomationProjectArn"}, + "projectStage":{"shape":"DataAutomationProjectStage"} + }, + "documentation":"

Data Automation Project Filter

" + }, + "DataAutomationProjectName":{ + "type":"string", + "documentation":"

Name of the DataAutomationProject

", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9-_]+", + "sensitive":true + }, + "DataAutomationProjectStage":{ + "type":"string", + "documentation":"

Stage of the Project

", + "enum":[ + "DEVELOPMENT", + "LIVE" + ] + }, + "DataAutomationProjectStageFilter":{ + "type":"string", + "documentation":"

Project Stage filter

", + "enum":[ + "DEVELOPMENT", + "LIVE", + "ALL" + ] + }, + "DataAutomationProjectStatus":{ + "type":"string", + "documentation":"

Status of Data Automation Project

", + "enum":[ + "COMPLETED", + "IN_PROGRESS", + "FAILED" + ] + }, + "DataAutomationProjectSummaries":{ + "type":"list", + "member":{"shape":"DataAutomationProjectSummary"}, + "documentation":"

List of DataAutomationProjectSummary

" + }, + "DataAutomationProjectSummary":{ + "type":"structure", + "required":[ + "projectArn", + "creationTime" + ], + "members":{ + "projectArn":{"shape":"DataAutomationProjectArn"}, + "projectStage":{"shape":"DataAutomationProjectStage"}, + "projectName":{"shape":"DataAutomationProjectName"}, + "creationTime":{"shape":"DateTimestamp"} + }, + "documentation":"

Summary of a DataAutomationProject

" + }, + "DateTimestamp":{ + "type":"timestamp", + "documentation":"

Time Stamp

", + "timestampFormat":"iso8601" + }, + "DeleteBlueprintRequest":{ + "type":"structure", + "required":["blueprintArn"], + "members":{ + "blueprintArn":{ + "shape":"BlueprintArn", + "documentation":"

ARN generated at the server side when a Blueprint is created

", + "location":"uri", + "locationName":"blueprintArn" + }, + "blueprintVersion":{ + "shape":"BlueprintVersion", + "documentation":"

Optional field to delete a specific Blueprint version

", + "location":"querystring", + "locationName":"blueprintVersion" + } + }, + "documentation":"

Delete Blueprint Request

" + }, + "DeleteBlueprintResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

Delete Blueprint Response

" + }, + "DeleteDataAutomationProjectRequest":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{ + "shape":"DataAutomationProjectArn", + "documentation":"

ARN generated at the server side when a DataAutomationProject is created

", + "location":"uri", + "locationName":"projectArn" + } + }, + "documentation":"

Delete DataAutomationProject Request

" + }, + "DeleteDataAutomationProjectResponse":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{"shape":"DataAutomationProjectArn"}, + "status":{"shape":"DataAutomationProjectStatus"} + }, + "documentation":"

Delete DataAutomationProject Response

" + }, + "DocumentBoundingBox":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"} + }, + "documentation":"

Bounding Box Configuration of Document Extraction

" + }, + "DocumentExtractionGranularity":{ + "type":"structure", + "members":{ + "types":{"shape":"DocumentExtractionGranularityTypes"} + }, + "documentation":"

Granularity of Document Extraction

" + }, + "DocumentExtractionGranularityType":{ + "type":"string", + "enum":[ + "DOCUMENT", + "PAGE", + "ELEMENT", + "WORD", + "LINE" + ] + }, + "DocumentExtractionGranularityTypes":{ + "type":"list", + "member":{"shape":"DocumentExtractionGranularityType"}, + "documentation":"

List of Document Extraction Granularity Type

" + }, + "DocumentOutputAdditionalFileFormat":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"} + }, + "documentation":"

Additional File Format of Document Output

" + }, + "DocumentOutputFormat":{ + "type":"structure", + "required":[ + "textFormat", + "additionalFileFormat" + ], + "members":{ + "textFormat":{"shape":"DocumentOutputTextFormat"}, + "additionalFileFormat":{"shape":"DocumentOutputAdditionalFileFormat"} + }, + "documentation":"

Output Format of Document

" + }, + "DocumentOutputTextFormat":{ + "type":"structure", + "members":{ + "types":{"shape":"DocumentOutputTextFormatTypes"} + }, + "documentation":"

Text Format of Document Output

" + }, + "DocumentOutputTextFormatType":{ + "type":"string", + "enum":[ + "PLAIN_TEXT", + "MARKDOWN", + "HTML", + "CSV" + ] + }, + "DocumentOutputTextFormatTypes":{ + "type":"list", + "member":{"shape":"DocumentOutputTextFormatType"}, + "documentation":"

List of Document Output Text Format Type

" + }, + "DocumentOverrideConfiguration":{ + "type":"structure", + "members":{ + "splitter":{"shape":"SplitterConfiguration"} + }, + "documentation":"

Override Configuration of Document

" + }, + "DocumentStandardExtraction":{ + "type":"structure", + "required":[ + "granularity", + "boundingBox" + ], + "members":{ + "granularity":{"shape":"DocumentExtractionGranularity"}, + "boundingBox":{"shape":"DocumentBoundingBox"} + }, + "documentation":"

Standard Extraction Configuration of Document

" + }, + "DocumentStandardGenerativeField":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"} + }, + "documentation":"

Standard Generative Field Configuration of Document

" + }, + "DocumentStandardOutputConfiguration":{ + "type":"structure", + "members":{ + "extraction":{"shape":"DocumentStandardExtraction"}, + "generativeField":{"shape":"DocumentStandardGenerativeField"}, + "outputFormat":{"shape":"DocumentOutputFormat"} + }, + "documentation":"

Standard Output Configuration of Document

" + }, + "EncryptionConfiguration":{ + "type":"structure", + "required":["kmsKeyId"], + "members":{ + "kmsKeyId":{"shape":"KmsKeyId"}, + "kmsEncryptionContext":{"shape":"KmsEncryptionContext"} + }, + "documentation":"

KMS Encryption Configuration

" + }, + "GetBlueprintRequest":{ + "type":"structure", + "required":["blueprintArn"], + "members":{ + "blueprintArn":{ + "shape":"BlueprintArn", + "documentation":"

ARN generated at the server side when a Blueprint is created

", + "location":"uri", + "locationName":"blueprintArn" + }, + "blueprintVersion":{ + "shape":"BlueprintVersion", + "documentation":"

Optional field to get a specific Blueprint version

" + }, + "blueprintStage":{ + "shape":"BlueprintStage", + "documentation":"

Optional field to get a specific Blueprint stage

" + } + }, + "documentation":"

Get Blueprint Request

" + }, + "GetBlueprintResponse":{ + "type":"structure", + "required":["blueprint"], + "members":{ + "blueprint":{"shape":"Blueprint"} + }, + "documentation":"

Get Blueprint Response

" + }, + "GetDataAutomationProjectRequest":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{ + "shape":"DataAutomationProjectArn", + "documentation":"

ARN generated at the server side when a DataAutomationProject is created

", + "location":"uri", + "locationName":"projectArn" + }, + "projectStage":{ + "shape":"DataAutomationProjectStage", + "documentation":"

Optional field to delete a specific DataAutomationProject stage

" + } + }, + "documentation":"

Get DataAutomationProject Request

" + }, + "GetDataAutomationProjectResponse":{ + "type":"structure", + "required":["project"], + "members":{ + "project":{"shape":"DataAutomationProject"} + }, + "documentation":"

Get DataAutomationProject Response

" + }, + "ImageBoundingBox":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"} + }, + "documentation":"

Bounding Box Configuration of Image Extraction

" + }, + "ImageExtractionCategory":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"}, + "types":{"shape":"ImageExtractionCategoryTypes"} + }, + "documentation":"

Category of Image Extraction

" + }, + "ImageExtractionCategoryType":{ + "type":"string", + "enum":[ + "CONTENT_MODERATION", + "TEXT_DETECTION" + ] + }, + "ImageExtractionCategoryTypes":{ + "type":"list", + "member":{"shape":"ImageExtractionCategoryType"}, + "documentation":"

List of Image Extraction Category

" + }, + "ImageStandardExtraction":{ + "type":"structure", + "required":[ + "category", + "boundingBox" + ], + "members":{ + "category":{"shape":"ImageExtractionCategory"}, + "boundingBox":{"shape":"ImageBoundingBox"} + }, + "documentation":"

Standard Extraction Configuration of Image

" + }, + "ImageStandardGenerativeField":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"}, + "types":{"shape":"ImageStandardGenerativeFieldTypes"} + }, + "documentation":"

Standard Generative Field Configuration of Image

" + }, + "ImageStandardGenerativeFieldType":{ + "type":"string", + "enum":[ + "IMAGE_SUMMARY", + "IAB" + ] + }, + "ImageStandardGenerativeFieldTypes":{ + "type":"list", + "member":{"shape":"ImageStandardGenerativeFieldType"}, + "documentation":"

List of Image Standard Generative Field Type

" + }, + "ImageStandardOutputConfiguration":{ + "type":"structure", + "members":{ + "extraction":{"shape":"ImageStandardExtraction"}, + "generativeField":{"shape":"ImageStandardGenerativeField"} + }, + "documentation":"

Standard Output Configuration of Image

" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception is thrown if there was an unexpected error during processing of request

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "KmsEncryptionContext":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"}, + "documentation":"

KMS Encryption Context

", + "min":1 + }, + "KmsKeyId":{ + "type":"string", + "documentation":"

KMS Key Identifier

", + "max":2048, + "min":1 + }, + "ListBlueprintsRequest":{ + "type":"structure", + "members":{ + "blueprintArn":{"shape":"BlueprintArn"}, + "resourceOwner":{"shape":"ResourceOwner"}, + "blueprintStageFilter":{"shape":"BlueprintStageFilter"}, + "maxResults":{"shape":"MaxResults"}, + "nextToken":{"shape":"NextToken"}, + "projectFilter":{"shape":"DataAutomationProjectFilter"} + }, + "documentation":"

List Blueprint Request

" + }, + "ListBlueprintsResponse":{ + "type":"structure", + "required":["blueprints"], + "members":{ + "blueprints":{"shape":"Blueprints"}, + "nextToken":{"shape":"NextToken"} + }, + "documentation":"

List Blueprint Response

" + }, + "ListDataAutomationProjectsRequest":{ + "type":"structure", + "members":{ + "maxResults":{"shape":"MaxResults"}, + "nextToken":{"shape":"NextToken"}, + "projectStageFilter":{"shape":"DataAutomationProjectStageFilter"}, + "blueprintFilter":{"shape":"BlueprintFilter"}, + "resourceOwner":{"shape":"ResourceOwner"} + }, + "documentation":"

List DataAutomationProject Request

" + }, + "ListDataAutomationProjectsResponse":{ + "type":"structure", + "required":["projects"], + "members":{ + "projects":{"shape":"DataAutomationProjectSummaries"}, + "nextToken":{"shape":"NextToken"} + }, + "documentation":"

List DataAutomationProject Response

" + }, + "MaxResults":{ + "type":"integer", + "documentation":"

Max Results

", + "box":true, + "max":1000, + "min":1 + }, + "NextToken":{ + "type":"string", + "documentation":"

Pagination token

", + "max":2048, + "min":1, + "pattern":"\\S*" + }, + "NonBlankString":{ + "type":"string", + "documentation":"

Non Blank String

", + "pattern":"[\\s\\S]+" + }, + "OverrideConfiguration":{ + "type":"structure", + "members":{ + "document":{"shape":"DocumentOverrideConfiguration"} + }, + "documentation":"

Override configuration

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception is thrown when a resource referenced by the operation does not exist

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceOwner":{ + "type":"string", + "documentation":"

Resource Owner

", + "enum":[ + "SERVICE", + "ACCOUNT" + ] + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception is thrown when a request is made beyond the service quota

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SplitterConfiguration":{ + "type":"structure", + "members":{ + "state":{"shape":"State"} + }, + "documentation":"

Configuration of Splitter

" + }, + "StandardOutputConfiguration":{ + "type":"structure", + "members":{ + "document":{"shape":"DocumentStandardOutputConfiguration"}, + "image":{"shape":"ImageStandardOutputConfiguration"}, + "video":{"shape":"VideoStandardOutputConfiguration"}, + "audio":{"shape":"AudioStandardOutputConfiguration"} + }, + "documentation":"

Standard output configuration

" + }, + "State":{ + "type":"string", + "documentation":"

State

", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

This exception is thrown when the number of requests exceeds the limit

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "Type":{ + "type":"string", + "documentation":"

Type

", + "enum":[ + "DOCUMENT", + "IMAGE" + ] + }, + "UpdateBlueprintRequest":{ + "type":"structure", + "required":[ + "blueprintArn", + "schema" + ], + "members":{ + "blueprintArn":{ + "shape":"BlueprintArn", + "documentation":"

ARN generated at the server side when a Blueprint is created

", + "location":"uri", + "locationName":"blueprintArn" + }, + "schema":{"shape":"BlueprintSchema"}, + "blueprintStage":{"shape":"BlueprintStage"} + }, + "documentation":"

Update Blueprint Request

" + }, + "UpdateBlueprintResponse":{ + "type":"structure", + "required":["blueprint"], + "members":{ + "blueprint":{"shape":"Blueprint"} + }, + "documentation":"

Update Blueprint Response

" + }, + "UpdateDataAutomationProjectRequest":{ + "type":"structure", + "required":[ + "projectArn", + "standardOutputConfiguration" + ], + "members":{ + "projectArn":{ + "shape":"DataAutomationProjectArn", + "documentation":"

ARN generated at the server side when a DataAutomationProject is created

", + "location":"uri", + "locationName":"projectArn" + }, + "projectStage":{"shape":"DataAutomationProjectStage"}, + "projectDescription":{"shape":"DataAutomationProjectDescription"}, + "standardOutputConfiguration":{"shape":"StandardOutputConfiguration"}, + "customOutputConfiguration":{"shape":"CustomOutputConfiguration"}, + "overrideConfiguration":{"shape":"OverrideConfiguration"} + }, + "documentation":"

Update DataAutomationProject Request

" + }, + "UpdateDataAutomationProjectResponse":{ + "type":"structure", + "required":["projectArn"], + "members":{ + "projectArn":{"shape":"DataAutomationProjectArn"}, + "projectStage":{"shape":"DataAutomationProjectStage"}, + "status":{"shape":"DataAutomationProjectStatus"} + }, + "documentation":"

Update DataAutomationProject Response

" + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"}, + "fieldList":{"shape":"ValidationExceptionFieldList"} + }, + "documentation":"

This exception is thrown when the request's input validation fails

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{"shape":"NonBlankString"}, + "message":{"shape":"NonBlankString"} + }, + "documentation":"

Stores information about a field passed inside a request that resulted in an exception

" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"}, + "documentation":"

list of ValidationExceptionField

" + }, + "VideoBoundingBox":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"} + }, + "documentation":"

Bounding Box Configuration of Video Extraction

" + }, + "VideoExtractionCategory":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"}, + "types":{"shape":"VideoExtractionCategoryTypes"} + }, + "documentation":"

Category of Video Extraction

" + }, + "VideoExtractionCategoryType":{ + "type":"string", + "enum":[ + "CONTENT_MODERATION", + "TEXT_DETECTION", + "TRANSCRIPT" + ] + }, + "VideoExtractionCategoryTypes":{ + "type":"list", + "member":{"shape":"VideoExtractionCategoryType"}, + "documentation":"

List of Video Extraction Category Type

" + }, + "VideoStandardExtraction":{ + "type":"structure", + "required":[ + "category", + "boundingBox" + ], + "members":{ + "category":{"shape":"VideoExtractionCategory"}, + "boundingBox":{"shape":"VideoBoundingBox"} + }, + "documentation":"

Standard Extraction Configuration of Video

" + }, + "VideoStandardGenerativeField":{ + "type":"structure", + "required":["state"], + "members":{ + "state":{"shape":"State"}, + "types":{"shape":"VideoStandardGenerativeFieldTypes"} + }, + "documentation":"

Standard Generative Field Configuration of Video

" + }, + "VideoStandardGenerativeFieldType":{ + "type":"string", + "enum":[ + "VIDEO_SUMMARY", + "SCENE_SUMMARY", + "IAB" + ] + }, + "VideoStandardGenerativeFieldTypes":{ + "type":"list", + "member":{"shape":"VideoStandardGenerativeFieldType"}, + "documentation":"

List of Video Standard Generative Field Type

" + }, + "VideoStandardOutputConfiguration":{ + "type":"structure", + "members":{ + "extraction":{"shape":"VideoStandardExtraction"}, + "generativeField":{"shape":"VideoStandardGenerativeField"} + }, + "documentation":"

Standard Output Configuration of Video

" + } + }, + "documentation":"

Amazon Bedrock Keystone Build

" +} diff --git a/botocore/data/bedrock-runtime/2023-09-30/service-2.json b/botocore/data/bedrock-runtime/2023-09-30/service-2.json index a3f4d932c8..677a523eb4 100644 --- a/botocore/data/bedrock-runtime/2023-09-30/service-2.json +++ b/botocore/data/bedrock-runtime/2023-09-30/service-2.json @@ -529,7 +529,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+)))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|([a-zA-Z0-9-:.]+)|(^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10}(?::[0-9]{1,5})?))" + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+)))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|([a-zA-Z0-9-:.]+)|(^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10}(?::[0-9]{1,5})?))$|(^arn:aws:sagemaker:[a-z0-9-]+:[0-9]{12}:endpoint/[a-zA-Z0-9-]+$)|(^arn:aws(-[^:]+)?:bedrock:([0-9a-z-]{1,20}):([0-9]{12}):default-prompt-router/[a-zA-Z0-9-:.]+$)" }, "ConverseMetrics":{ "type":"structure", @@ -823,6 +823,10 @@ "guardrail":{ "shape":"GuardrailTraceAssessment", "documentation":"

The guardrail trace object.

" + }, + "promptRouter":{ + "shape":"PromptRouterTrace", + "documentation":"

The request's prompt router.

" } }, "documentation":"

The trace object in a response from ConverseStream. Currently, you can only trace guardrails.

" @@ -833,6 +837,10 @@ "guardrail":{ "shape":"GuardrailTraceAssessment", "documentation":"

The guardrail trace object.

" + }, + "promptRouter":{ + "shape":"PromptRouterTrace", + "documentation":"

The request's prompt router.

" } }, "documentation":"

The trace object in a response from Converse. Currently, you can only trace guardrails.

" @@ -1039,6 +1047,10 @@ "text":{ "shape":"GuardrailTextBlock", "documentation":"

Text within content block to be evaluated by the guardrail.

" + }, + "image":{ + "shape":"GuardrailImageBlock", + "documentation":"

Image within guardrail content block to be evaluated by the guardrail.

" } }, "documentation":"

The content block to be evaluated by the guardrail.

", @@ -1224,6 +1236,10 @@ "text":{ "shape":"GuardrailConverseTextBlock", "documentation":"

The text to guard.

" + }, + "image":{ + "shape":"GuardrailConverseImageBlock", + "documentation":"

Image within converse content block to be evaluated by the guardrail.

" } }, "documentation":"

A content block for selective guarding with the Converse or ConverseStream API operations.

", @@ -1241,6 +1257,48 @@ "type":"list", "member":{"shape":"GuardrailConverseContentQualifier"} }, + "GuardrailConverseImageBlock":{ + "type":"structure", + "required":[ + "format", + "source" + ], + "members":{ + "format":{ + "shape":"GuardrailConverseImageFormat", + "documentation":"

The format details for the image type of the guardrail converse image block.

" + }, + "source":{ + "shape":"GuardrailConverseImageSource", + "documentation":"

The image source (image bytes) of the guardrail converse image block.

" + } + }, + "documentation":"

An image block that contains images that you want to assess with a guardrail.

", + "sensitive":true + }, + "GuardrailConverseImageFormat":{ + "type":"string", + "enum":[ + "png", + "jpeg" + ] + }, + "GuardrailConverseImageSource":{ + "type":"structure", + "members":{ + "bytes":{ + "shape":"GuardrailConverseImageSourceBytesBlob", + "documentation":"

The raw image bytes for the image.

" + } + }, + "documentation":"

The image source (image bytes) of the guardrail converse image source.

", + "sensitive":true, + "union":true + }, + "GuardrailConverseImageSourceBytesBlob":{ + "type":"blob", + "min":1 + }, "GuardrailConverseTextBlock":{ "type":"structure", "required":["text"], @@ -1262,6 +1320,10 @@ "textCharacters":{ "shape":"GuardrailTextCharactersCoverage", "documentation":"

The text characters of the guardrail coverage details.

" + }, + "images":{ + "shape":"GuardrailImageCoverage", + "documentation":"

The guardrail coverage for images (the number of images that guardrails guarded).

" } }, "documentation":"

The action of the guardrail coverage details.

" @@ -1294,6 +1356,62 @@ "min":0, "pattern":"(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))" }, + "GuardrailImageBlock":{ + "type":"structure", + "required":[ + "format", + "source" + ], + "members":{ + "format":{ + "shape":"GuardrailImageFormat", + "documentation":"

The format details for the file type of the image blocked by the guardrail.

" + }, + "source":{ + "shape":"GuardrailImageSource", + "documentation":"

The image source (image bytes) details of the image blocked by the guardrail.

" + } + }, + "documentation":"

Contain an image which user wants guarded. This block is accepted by the guardrails independent API.

", + "sensitive":true + }, + "GuardrailImageCoverage":{ + "type":"structure", + "members":{ + "guarded":{ + "shape":"ImagesGuarded", + "documentation":"

The count (integer) of images guardrails guarded.

" + }, + "total":{ + "shape":"ImagesTotal", + "documentation":"

Represents the total number of images (integer) that were in the request (guarded and unguarded).

" + } + }, + "documentation":"

The details of the guardrail image coverage.

" + }, + "GuardrailImageFormat":{ + "type":"string", + "enum":[ + "png", + "jpeg" + ] + }, + "GuardrailImageSource":{ + "type":"structure", + "members":{ + "bytes":{ + "shape":"GuardrailImageSourceBytesBlob", + "documentation":"

The bytes details of the guardrail image source. Object used in independent api.

" + } + }, + "documentation":"

The image source (image bytes) of the guardrail image source. Object used in independent api.

", + "sensitive":true, + "union":true + }, + "GuardrailImageSourceBytesBlob":{ + "type":"blob", + "min":1 + }, "GuardrailInvocationMetrics":{ "type":"structure", "members":{ @@ -1732,6 +1850,14 @@ "type":"blob", "min":1 }, + "ImagesGuarded":{ + "type":"integer", + "box":true + }, + "ImagesTotal":{ + "type":"integer", + "box":true + }, "InferenceConfiguration":{ "type":"structure", "members":{ @@ -1797,7 +1923,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+)))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|([a-zA-Z0-9-:.]+)$|(^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10}(?::[0-9]{1,5})?))" + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+)))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|([a-zA-Z0-9-:.]+)$|(^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:prompt/[0-9a-zA-Z]{10}(?::[0-9]{1,5})?))$|(^arn:aws:sagemaker:[a-z0-9-]+:[0-9]{12}:endpoint/[a-zA-Z0-9-]+$)|(^arn:aws(-[^:]+)?:bedrock:([0-9a-z-]{1,20}):([0-9]{12}):default-prompt-router/[a-zA-Z0-9-:.]+$)" }, "InvokeModelRequest":{ "type":"structure", @@ -1957,6 +2083,10 @@ }, "payload":"body" }, + "InvokedModelId":{ + "type":"string", + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2})|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):inference-profile/[a-zA-Z0-9-:.]+)" + }, "KmsKeyId":{ "type":"string", "max":2048, @@ -2214,6 +2344,16 @@ }, "documentation":"

Performance settings for a model.

" }, + "PromptRouterTrace":{ + "type":"structure", + "members":{ + "invokedModelId":{ + "shape":"InvokedModelId", + "documentation":"

The ID of the invoked model.

" + } + }, + "documentation":"

A prompt router trace.

" + }, "PromptVariableMap":{ "type":"map", "key":{"shape":"String"}, diff --git a/botocore/data/bedrock/2023-04-20/paginators-1.json b/botocore/data/bedrock/2023-04-20/paginators-1.json index 9f307472c1..561133f3fe 100644 --- a/botocore/data/bedrock/2023-04-20/paginators-1.json +++ b/botocore/data/bedrock/2023-04-20/paginators-1.json @@ -59,6 +59,18 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "inferenceProfileSummaries" + }, + "ListMarketplaceModelEndpoints": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "marketplaceModelEndpoints" + }, + "ListPromptRouters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "promptRouterSummaries" } } } diff --git a/botocore/data/bedrock/2023-04-20/service-2.json b/botocore/data/bedrock/2023-04-20/service-2.json index b4996d4117..11b217be24 100644 --- a/botocore/data/bedrock/2023-04-20/service-2.json +++ b/botocore/data/bedrock/2023-04-20/service-2.json @@ -116,6 +116,26 @@ "documentation":"

Creates an application inference profile to track metrics and costs when invoking a model. To create an application inference profile for a foundation model in one region, specify the ARN of the model in that region. To create an application inference profile for a foundation model across multiple regions, specify the ARN of the system-defined inference profile that contains the regions that you want to route requests to. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.

", "idempotent":true }, + "CreateMarketplaceModelEndpoint":{ + "name":"CreateMarketplaceModelEndpoint", + "http":{ + "method":"POST", + "requestUri":"/marketplace-model/endpoints", + "responseCode":200 + }, + "input":{"shape":"CreateMarketplaceModelEndpointRequest"}, + "output":{"shape":"CreateMarketplaceModelEndpointResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Creates an endpoint for a model from Amazon Bedrock Marketplace. The endpoint is hosted by Amazon SageMaker.

" + }, "CreateModelCopyJob":{ "name":"CreateModelCopyJob", "http":{ @@ -300,6 +320,25 @@ "documentation":"

Deletes an application inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.

", "idempotent":true }, + "DeleteMarketplaceModelEndpoint":{ + "name":"DeleteMarketplaceModelEndpoint", + "http":{ + "method":"DELETE", + "requestUri":"/marketplace-model/endpoints/{endpointArn}", + "responseCode":200 + }, + "input":{"shape":"DeleteMarketplaceModelEndpointRequest"}, + "output":{"shape":"DeleteMarketplaceModelEndpointResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes an endpoint for a model from Amazon Bedrock Marketplace.

", + "idempotent":true + }, "DeleteModelInvocationLoggingConfiguration":{ "name":"DeleteModelInvocationLoggingConfiguration", "http":{ @@ -337,6 +376,25 @@ "documentation":"

Deletes a Provisioned Throughput. You can't delete a Provisioned Throughput before the commitment term is over. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

", "idempotent":true }, + "DeregisterMarketplaceModelEndpoint":{ + "name":"DeregisterMarketplaceModelEndpoint", + "http":{ + "method":"DELETE", + "requestUri":"/marketplace-model/endpoints/{endpointArn}/registration", + "responseCode":200 + }, + "input":{"shape":"DeregisterMarketplaceModelEndpointRequest"}, + "output":{"shape":"DeregisterMarketplaceModelEndpointResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deregisters an endpoint for a model from Amazon Bedrock Marketplace. This operation removes the endpoint's association with Amazon Bedrock but does not delete the underlying Amazon SageMaker endpoint.

" + }, "GetCustomModel":{ "name":"GetCustomModel", "http":{ @@ -445,6 +503,24 @@ ], "documentation":"

Gets information about an inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.

" }, + "GetMarketplaceModelEndpoint":{ + "name":"GetMarketplaceModelEndpoint", + "http":{ + "method":"GET", + "requestUri":"/marketplace-model/endpoints/{endpointArn}", + "responseCode":200 + }, + "input":{"shape":"GetMarketplaceModelEndpointRequest"}, + "output":{"shape":"GetMarketplaceModelEndpointResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves details about a specific endpoint for a model from Amazon Bedrock Marketplace.

" + }, "GetModelCopyJob":{ "name":"GetModelCopyJob", "http":{ @@ -533,6 +609,24 @@ ], "documentation":"

Get the current configuration values for model invocation logging.

" }, + "GetPromptRouter":{ + "name":"GetPromptRouter", + "http":{ + "method":"GET", + "requestUri":"/prompt-routers/{promptRouterArn}", + "responseCode":200 + }, + "input":{"shape":"GetPromptRouterRequest"}, + "output":{"shape":"GetPromptRouterResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves details about a prompt router.

" + }, "GetProvisionedModelThroughput":{ "name":"GetProvisionedModelThroughput", "http":{ @@ -654,6 +748,24 @@ ], "documentation":"

Returns a list of inference profiles that you can use. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.

" }, + "ListMarketplaceModelEndpoints":{ + "name":"ListMarketplaceModelEndpoints", + "http":{ + "method":"GET", + "requestUri":"/marketplace-model/endpoints", + "responseCode":200 + }, + "input":{"shape":"ListMarketplaceModelEndpointsRequest"}, + "output":{"shape":"ListMarketplaceModelEndpointsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Lists the endpoints for models from Amazon Bedrock Marketplace in your Amazon Web Services account.

" + }, "ListModelCopyJobs":{ "name":"ListModelCopyJobs", "http":{ @@ -723,6 +835,23 @@ ], "documentation":"

Lists all batch inference jobs in the account. For more information, see View details about a batch inference job.

" }, + "ListPromptRouters":{ + "name":"ListPromptRouters", + "http":{ + "method":"GET", + "requestUri":"/prompt-routers", + "responseCode":200 + }, + "input":{"shape":"ListPromptRoutersRequest"}, + "output":{"shape":"ListPromptRoutersResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a list of prompt routers.

" + }, "ListProvisionedModelThroughputs":{ "name":"ListProvisionedModelThroughputs", "http":{ @@ -776,6 +905,25 @@ "documentation":"

Set the configuration values for model invocation logging.

", "idempotent":true }, + "RegisterMarketplaceModelEndpoint":{ + "name":"RegisterMarketplaceModelEndpoint", + "http":{ + "method":"POST", + "requestUri":"/marketplace-model/endpoints/{endpointIdentifier}/registration", + "responseCode":200 + }, + "input":{"shape":"RegisterMarketplaceModelEndpointRequest"}, + "output":{"shape":"RegisterMarketplaceModelEndpointResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Registers an existing Amazon SageMaker endpoint with Amazon Bedrock Marketplace, allowing it to be used with Amazon Bedrock APIs.

" + }, "StopEvaluationJob":{ "name":"StopEvaluationJob", "http":{ @@ -892,6 +1040,26 @@ "documentation":"

Updates a guardrail with the values you specify.

", "idempotent":true }, + "UpdateMarketplaceModelEndpoint":{ + "name":"UpdateMarketplaceModelEndpoint", + "http":{ + "method":"PATCH", + "requestUri":"/marketplace-model/endpoints/{endpointArn}", + "responseCode":200 + }, + "input":{"shape":"UpdateMarketplaceModelEndpointRequest"}, + "output":{"shape":"UpdateMarketplaceModelEndpointResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates the configuration of an existing endpoint for a model from Amazon Bedrock Marketplace.

" + }, "UpdateProvisionedModelThroughput":{ "name":"UpdateProvisionedModelThroughput", "http":{ @@ -913,6 +1081,7 @@ } }, "shapes":{ + "AcceptEula":{"type":"boolean"}, "AccessDeniedException":{ "type":"structure", "members":{ @@ -952,6 +1121,11 @@ "RagEvaluation" ] }, + "Arn":{ + "type":"string", + "max":2048, + "min":0 + }, "AutomatedEvaluationConfig":{ "type":"structure", "required":["datasetMetricConfigs"], @@ -1399,6 +1573,51 @@ } } }, + "CreateMarketplaceModelEndpointRequest":{ + "type":"structure", + "required":[ + "modelSourceIdentifier", + "endpointConfig", + "endpointName" + ], + "members":{ + "modelSourceIdentifier":{ + "shape":"ModelSourceIdentifier", + "documentation":"

The ARN of the model from Amazon Bedrock Marketplace that you want to deploy to the endpoint.

" + }, + "endpointConfig":{ + "shape":"EndpointConfig", + "documentation":"

The configuration for the endpoint, including the number and type of instances to use.

" + }, + "acceptEula":{ + "shape":"AcceptEula", + "documentation":"

Indicates whether you accept the end-user license agreement (EULA) for the model. Set to true to accept the EULA.

" + }, + "endpointName":{ + "shape":"EndpointName", + "documentation":"

The name of the endpoint. This name must be unique within your Amazon Web Services account and region.

" + }, + "clientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. This token is listed as not required because Amazon Web Services SDKs automatically generate it for you and set this parameter. If you're not using the Amazon Web Services SDK or the CLI, you must provide this token or the action will fail.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs to apply to the underlying Amazon SageMaker endpoint. You can use these tags to organize and identify your Amazon Web Services resources.

" + } + } + }, + "CreateMarketplaceModelEndpointResponse":{ + "type":"structure", + "required":["marketplaceModelEndpoint"], + "members":{ + "marketplaceModelEndpoint":{ + "shape":"MarketplaceModelEndpoint", + "documentation":"

Details about the created endpoint.

" + } + } + }, "CreateModelCopyJobRequest":{ "type":"structure", "required":[ @@ -1834,6 +2053,23 @@ "members":{ } }, + "DeleteMarketplaceModelEndpointRequest":{ + "type":"structure", + "required":["endpointArn"], + "members":{ + "endpointArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the endpoint you want to delete.

", + "location":"uri", + "locationName":"endpointArn" + } + } + }, + "DeleteMarketplaceModelEndpointResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteModelInvocationLoggingConfigurationRequest":{ "type":"structure", "members":{ @@ -1861,6 +2097,23 @@ "members":{ } }, + "DeregisterMarketplaceModelEndpointRequest":{ + "type":"structure", + "required":["endpointArn"], + "members":{ + "endpointArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the endpoint you want to deregister.

", + "location":"uri", + "locationName":"endpointArn" + } + } + }, + "DeregisterMarketplaceModelEndpointResponse":{ + "type":"structure", + "members":{ + } + }, "DistillationConfig":{ "type":"structure", "required":["teacherModelConfig"], @@ -1872,6 +2125,22 @@ }, "documentation":"

Settings for distilling a foundation model into a smaller and more efficient model.

" }, + "EndpointConfig":{ + "type":"structure", + "members":{ + "sageMaker":{ + "shape":"SageMakerEndpoint", + "documentation":"

The configuration specific to Amazon SageMaker for the endpoint.

" + } + }, + "documentation":"

Specifies the configuration for the endpoint.

", + "union":true + }, + "EndpointName":{ + "type":"string", + "max":30, + "min":1 + }, "ErrorMessage":{ "type":"string", "max":2048, @@ -2081,7 +2350,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:((:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:application-inference-profile/[a-z0-9]{12})|([0-9]{12}:inference-profile/(([a-z]{2}.)[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))))|(([a-z]{2}[.]{1})([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))" + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:((:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:application-inference-profile/[a-z0-9]{12})|([0-9]{12}:inference-profile/(([a-z-]{2,8}.)[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))|([0-9]{12}:default-prompt-router/[a-zA-Z0-9-:.]+)))|(([a-z]{2}[.]{1})([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|arn:aws(-[^:]+)?:sagemaker:[a-z0-9-]{1,20}:[0-9]{12}:endpoint/[a-z0-9-]{1,63}" }, "EvaluationModelIdentifiers":{ "type":"list", @@ -2888,6 +3157,27 @@ } } }, + "GetMarketplaceModelEndpointRequest":{ + "type":"structure", + "required":["endpointArn"], + "members":{ + "endpointArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the endpoint you want to get information about.

", + "location":"uri", + "locationName":"endpointArn" + } + } + }, + "GetMarketplaceModelEndpointResponse":{ + "type":"structure", + "members":{ + "marketplaceModelEndpoint":{ + "shape":"MarketplaceModelEndpoint", + "documentation":"

Details about the requested endpoint.

" + } + } + }, "GetModelCopyJobRequest":{ "type":"structure", "required":["jobArn"], @@ -3241,6 +3531,72 @@ } } }, + "GetPromptRouterRequest":{ + "type":"structure", + "required":["promptRouterArn"], + "members":{ + "promptRouterArn":{ + "shape":"PromptRouterArn", + "documentation":"

The prompt router's ARN

", + "location":"uri", + "locationName":"promptRouterArn" + } + } + }, + "GetPromptRouterResponse":{ + "type":"structure", + "required":[ + "promptRouterName", + "routingCriteria", + "promptRouterArn", + "models", + "fallbackModel", + "status", + "type" + ], + "members":{ + "promptRouterName":{ + "shape":"PromptRouterName", + "documentation":"

The router's name.

" + }, + "routingCriteria":{ + "shape":"RoutingCriteria", + "documentation":"

The router's routing criteria.

" + }, + "description":{ + "shape":"PromptRouterDescription", + "documentation":"

The router's description.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

When the router was created.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

When the router was updated.

" + }, + "promptRouterArn":{ + "shape":"PromptRouterArn", + "documentation":"

The prompt router's ARN

" + }, + "models":{ + "shape":"PromptRouterTargetModels", + "documentation":"

The router's models.

" + }, + "fallbackModel":{ + "shape":"PromptRouterTargetModel", + "documentation":"

The router's fallback model.

" + }, + "status":{ + "shape":"PromptRouterStatus", + "documentation":"

The router's status.

" + }, + "type":{ + "shape":"PromptRouterType", + "documentation":"

The router's type.

" + } + } + }, "GetProvisionedModelThroughputRequest":{ "type":"structure", "required":["provisionedModelId"], @@ -3383,6 +3739,14 @@ "outputStrength":{ "shape":"GuardrailFilterStrength", "documentation":"

The strength of the content filter to apply to model responses. As you increase the filter strength, the likelihood of filtering harmful content increases and the probability of seeing harmful content in your application reduces.

" + }, + "inputModalities":{ + "shape":"GuardrailModalities", + "documentation":"

The input modalities selected for the guardrail content filter.

" + }, + "outputModalities":{ + "shape":"GuardrailModalities", + "documentation":"

The output modalities selected for the guardrail content filter.

" } }, "documentation":"

Contains filter strengths for harmful content. Guardrails support the following content filters to detect and filter harmful user inputs and FM-generated outputs.

Content filtering depends on the confidence classification of user inputs and FM responses across each of the four harmful categories. All input and output statements are classified into one of four confidence levels (NONE, LOW, MEDIUM, HIGH) for each harmful category. For example, if a statement is classified as Hate with HIGH confidence, the likelihood of the statement representing hateful content is high. A single statement can be classified across multiple categories with varying confidence levels. For example, a single statement can be classified as Hate with HIGH confidence, Insults with LOW confidence, Sexual with NONE confidence, and Violence with MEDIUM confidence.

For more information, see Guardrails content filters.

This data type is used in the following API operations:

" @@ -3406,6 +3770,14 @@ "outputStrength":{ "shape":"GuardrailFilterStrength", "documentation":"

The strength of the content filter to apply to model responses. As you increase the filter strength, the likelihood of filtering harmful content increases and the probability of seeing harmful content in your application reduces.

" + }, + "inputModalities":{ + "shape":"GuardrailModalities", + "documentation":"

The input modalities selected for the guardrail content filter configuration.

" + }, + "outputModalities":{ + "shape":"GuardrailModalities", + "documentation":"

The output modalities selected for the guardrail content filter configuration.

" } }, "documentation":"

Contains filter strengths for harmful content. Guardrails support the following content filters to detect and filter harmful user inputs and FM-generated outputs.

Content filtering depends on the confidence classification of user inputs and FM responses across each of the four harmful categories. All input and output statements are classified into one of four confidence levels (NONE, LOW, MEDIUM, HIGH) for each harmful category. For example, if a statement is classified as Hate with HIGH confidence, the likelihood of the statement representing hateful content is high. A single statement can be classified across multiple categories with varying confidence levels. For example, a single statement can be classified as Hate with HIGH confidence, Insults with LOW confidence, Sexual with NONE confidence, and Violence with MEDIUM confidence.

For more information, see Guardrails content filters.

" @@ -3618,6 +3990,20 @@ "type":"string", "enum":["PROFANITY"] }, + "GuardrailModalities":{ + "type":"list", + "member":{"shape":"GuardrailModality"}, + "max":2, + "min":1 + }, + "GuardrailModality":{ + "type":"string", + "enum":[ + "TEXT", + "IMAGE" + ], + "sensitive":true + }, "GuardrailName":{ "type":"string", "max":50, @@ -3648,7 +4034,7 @@ "members":{ "type":{ "shape":"GuardrailPiiEntityType", - "documentation":"

The type of PII entity. For exampvle, Social Security Number.

" + "documentation":"

The type of PII entity. For example, Social Security Number.

" }, "action":{ "shape":"GuardrailSensitiveInformationAction", @@ -3666,7 +4052,7 @@ "members":{ "type":{ "shape":"GuardrailPiiEntityType", - "documentation":"

Configure guardrail type when the PII entity is detected.

The following PIIs are used to block or mask sensitive information:

" + "documentation":"

Configure guardrail type when the PII entity is detected.

The following PIIs are used to block or mask sensitive information:

" }, "action":{ "shape":"GuardrailSensitiveInformationAction", @@ -4382,6 +4768,16 @@ "type":"list", "member":{"shape":"InferenceType"} }, + "InstanceCount":{ + "type":"integer", + "box":true, + "min":1 + }, + "InstanceType":{ + "type":"string", + "max":50, + "min":1 + }, "InstructSupported":{ "type":"boolean", "box":true @@ -4870,6 +5266,42 @@ } } }, + "ListMarketplaceModelEndpointsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in a single call. If more results are available, the operation returns a NextToken value.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. You receive this token from a previous ListMarketplaceModelEndpoints call.

", + "location":"querystring", + "locationName":"nextToken" + }, + "modelSourceEquals":{ + "shape":"ModelSourceIdentifier", + "documentation":"

If specified, only endpoints for the given model source identifier are returned.

", + "location":"querystring", + "locationName":"modelSourceIdentifier" + } + } + }, + "ListMarketplaceModelEndpointsResponse":{ + "type":"structure", + "members":{ + "marketplaceModelEndpoints":{ + "shape":"MarketplaceModelEndpointSummaries", + "documentation":"

An array of endpoint summaries.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of results. Use this token to get the next set of results.

" + } + } + }, "ListModelCopyJobsRequest":{ "type":"structure", "members":{ @@ -5146,6 +5578,36 @@ } } }, + "ListPromptRoutersRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of prompt routers to return in one page of results.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListPromptRoutersResponse":{ + "type":"structure", + "members":{ + "promptRouterSummaries":{ + "shape":"PromptRouterSummaries", + "documentation":"

A list of prompt router summaries.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

" + } + } + }, "ListProvisionedModelThroughputsRequest":{ "type":"structure", "members":{ @@ -5272,6 +5734,98 @@ }, "documentation":"

Configuration fields for invocation logging.

" }, + "MarketplaceModelEndpoint":{ + "type":"structure", + "required":[ + "endpointArn", + "modelSourceIdentifier", + "createdAt", + "updatedAt", + "endpointConfig", + "endpointStatus" + ], + "members":{ + "endpointArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the endpoint.

" + }, + "modelSourceIdentifier":{ + "shape":"ModelSourceIdentifier", + "documentation":"

The ARN of the model from Amazon Bedrock Marketplace that is deployed on this endpoint.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The overall status of the endpoint in Amazon Bedrock Marketplace (e.g., ACTIVE, INACTIVE).

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

Additional information about the overall status, if available.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the endpoint was registered.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the endpoint was last updated.

" + }, + "endpointConfig":{ + "shape":"EndpointConfig", + "documentation":"

The configuration of the endpoint, including the number and type of instances used.

" + }, + "endpointStatus":{ + "shape":"String", + "documentation":"

The current status of the endpoint (e.g., Creating, InService, Updating, Failed).

" + }, + "endpointStatusMessage":{ + "shape":"String", + "documentation":"

Additional information about the endpoint status, if available.

" + } + }, + "documentation":"

Contains details about an endpoint for a model from Amazon Bedrock Marketplace.

" + }, + "MarketplaceModelEndpointSummaries":{ + "type":"list", + "member":{"shape":"MarketplaceModelEndpointSummary"}, + "max":1000, + "min":0 + }, + "MarketplaceModelEndpointSummary":{ + "type":"structure", + "required":[ + "endpointArn", + "modelSourceIdentifier", + "createdAt", + "updatedAt" + ], + "members":{ + "endpointArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the endpoint.

" + }, + "modelSourceIdentifier":{ + "shape":"ModelSourceIdentifier", + "documentation":"

The ARN of the model from Amazon Bedrock Marketplace that is deployed on this endpoint.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The overall status of the endpoint in Amazon Bedrock Marketplace.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

Additional information about the overall status, if available.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the endpoint was created.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the endpoint was last updated.

" + } + }, + "documentation":"

Provides a summary of an endpoint for a model from Amazon Bedrock Marketplace.

" + }, "MaxResults":{ "type":"integer", "box":true, @@ -5766,6 +6320,12 @@ "min":1, "pattern":"([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63})" }, + "ModelSourceIdentifier":{ + "type":"string", + "max":2048, + "min":0, + "pattern":".*arn:aws:sagemaker:.*:hub-content/SageMakerPublicHub/Model/.*" + }, "NonBlankString":{ "type":"string", "pattern":"[\\s\\S]*" @@ -5803,6 +6363,115 @@ "box":true, "min":1 }, + "PromptRouterArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:default-prompt-router/[a-zA-Z0-9-:.]+" + }, + "PromptRouterDescription":{ + "type":"string", + "max":200, + "min":1, + "pattern":"([0-9a-zA-Z:.][ _-]?)+", + "sensitive":true + }, + "PromptRouterName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"([0-9a-zA-Z][ _-]?)+" + }, + "PromptRouterStatus":{ + "type":"string", + "enum":["AVAILABLE"] + }, + "PromptRouterSummaries":{ + "type":"list", + "member":{"shape":"PromptRouterSummary"} + }, + "PromptRouterSummary":{ + "type":"structure", + "required":[ + "promptRouterName", + "routingCriteria", + "promptRouterArn", + "models", + "fallbackModel", + "status", + "type" + ], + "members":{ + "promptRouterName":{ + "shape":"PromptRouterName", + "documentation":"

The router's name.

" + }, + "routingCriteria":{ + "shape":"RoutingCriteria", + "documentation":"

The router's routing criteria.

" + }, + "description":{ + "shape":"PromptRouterDescription", + "documentation":"

The router's description.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

When the router was created.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

When the router was updated.

" + }, + "promptRouterArn":{ + "shape":"PromptRouterArn", + "documentation":"

The router's ARN.

" + }, + "models":{ + "shape":"PromptRouterTargetModels", + "documentation":"

The router's models.

" + }, + "fallbackModel":{ + "shape":"PromptRouterTargetModel", + "documentation":"

The router's fallback model.

" + }, + "status":{ + "shape":"PromptRouterStatus", + "documentation":"

The router's status.

" + }, + "type":{ + "shape":"PromptRouterType", + "documentation":"

The summary's type.

" + } + }, + "documentation":"

Details about a prompt router.

" + }, + "PromptRouterTargetModel":{ + "type":"structure", + "members":{ + "modelArn":{ + "shape":"PromptRouterTargetModelArn", + "documentation":"

The target model's ARN.

" + } + }, + "documentation":"

The target model for a prompt router.

" + }, + "PromptRouterTargetModelArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".*(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2})|(^arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{0,20}):(|[0-9]{12}):inference-profile/[a-zA-Z0-9-:.]+)" + }, + "PromptRouterTargetModels":{ + "type":"list", + "member":{"shape":"PromptRouterTargetModel"} + }, + "PromptRouterType":{ + "type":"string", + "enum":[ + "custom", + "default" + ] + }, "PromptTemplate":{ "type":"structure", "members":{ @@ -5974,6 +6643,35 @@ "max":1, "min":1 }, + "RegisterMarketplaceModelEndpointRequest":{ + "type":"structure", + "required":[ + "endpointIdentifier", + "modelSourceIdentifier" + ], + "members":{ + "endpointIdentifier":{ + "shape":"Arn", + "documentation":"

The ARN of the Amazon SageMaker endpoint you want to register with Amazon Bedrock Marketplace.

", + "location":"uri", + "locationName":"endpointIdentifier" + }, + "modelSourceIdentifier":{ + "shape":"ModelSourceIdentifier", + "documentation":"

The ARN of the model from Amazon Bedrock Marketplace that is deployed on the endpoint.

" + } + } + }, + "RegisterMarketplaceModelEndpointResponse":{ + "type":"structure", + "required":["marketplaceModelEndpoint"], + "members":{ + "marketplaceModelEndpoint":{ + "shape":"MarketplaceModelEndpoint", + "documentation":"

Details about the registered endpoint.

" + } + } + }, "RequestMetadataBaseFilters":{ "type":"structure", "members":{ @@ -6165,6 +6863,23 @@ "min":0, "pattern":"arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+" }, + "RoutingCriteria":{ + "type":"structure", + "required":["responseQualityDifference"], + "members":{ + "responseQualityDifference":{ + "shape":"RoutingCriteriaResponseQualityDifferenceDouble", + "documentation":"

The criteria's response quality difference.

" + } + }, + "documentation":"

Routing criteria for a prompt router.

" + }, + "RoutingCriteriaResponseQualityDifferenceDouble":{ + "type":"double", + "box":true, + "max":1, + "min":0 + }, "S3Config":{ "type":"structure", "required":["bucketName"], @@ -6212,6 +6927,37 @@ "min":1, "pattern":"s3://[a-z0-9][-.a-z0-9]{1,61}(?:/[-!_*'().a-z0-9A-Z]+(?:/[-!_*'().a-z0-9A-Z]+)*)?/?" }, + "SageMakerEndpoint":{ + "type":"structure", + "required":[ + "initialInstanceCount", + "instanceType", + "executionRole" + ], + "members":{ + "initialInstanceCount":{ + "shape":"InstanceCount", + "documentation":"

The number of Amazon EC2 compute instances to deploy for initial endpoint creation.

" + }, + "instanceType":{ + "shape":"InstanceType", + "documentation":"

The Amazon EC2 compute instance type to deploy for hosting the model.

" + }, + "executionRole":{ + "shape":"RoleArn", + "documentation":"

The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and docker image for deployment on Amazon EC2 compute instances or for batch transform jobs.

" + }, + "kmsEncryptionKey":{ + "shape":"KmsKeyId", + "documentation":"

The Amazon Web Services KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the Amazon EC2 compute instance that hosts the endpoint.

" + }, + "vpc":{ + "shape":"VpcConfig", + "documentation":"

The VPC configuration for the endpoint.

" + } + }, + "documentation":"

Specifies the configuration for a Amazon SageMaker endpoint.

" + }, "SageMakerFlowDefinitionArn":{ "type":"string", "max":1024, @@ -6249,6 +6995,16 @@ }, "exception":true }, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

Returned if the service cannot complete the request.

", + "error":{"httpStatusCode":503}, + "exception":true, + "fault":true + }, "SortByProvisionedModels":{ "type":"string", "enum":["CreationTime"] @@ -6268,6 +7024,13 @@ "Descending" ] }, + "Status":{ + "type":"string", + "enum":[ + "REGISTERED", + "INCOMPATIBLE_ENDPOINT" + ] + }, "StopEvaluationJobRequest":{ "type":"structure", "required":["jobIdentifier"], @@ -6623,6 +7386,40 @@ } } }, + "UpdateMarketplaceModelEndpointRequest":{ + "type":"structure", + "required":[ + "endpointArn", + "endpointConfig" + ], + "members":{ + "endpointArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the endpoint you want to update.

", + "location":"uri", + "locationName":"endpointArn" + }, + "endpointConfig":{ + "shape":"EndpointConfig", + "documentation":"

The new configuration for the endpoint, including the number and type of instances to use.

" + }, + "clientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. This token is listed as not required because Amazon Web Services SDKs automatically generate it for you and set this parameter. If you're not using the Amazon Web Services SDK or the CLI, you must provide this token or the action will fail.

", + "idempotencyToken":true + } + } + }, + "UpdateMarketplaceModelEndpointResponse":{ + "type":"structure", + "required":["marketplaceModelEndpoint"], + "members":{ + "marketplaceModelEndpoint":{ + "shape":"MarketplaceModelEndpoint", + "documentation":"

Details about the updated endpoint.

" + } + } + }, "UpdateProvisionedModelThroughputRequest":{ "type":"structure", "required":["provisionedModelId"], diff --git a/botocore/data/kendra/2019-02-03/service-2.json b/botocore/data/kendra/2019-02-03/service-2.json index 9286175297..581915d23a 100644 --- a/botocore/data/kendra/2019-02-03/service-2.json +++ b/botocore/data/kendra/2019-02-03/service-2.json @@ -5,13 +5,15 @@ "endpointPrefix":"kendra", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"kendra", "serviceFullName":"AWSKendraFrontendService", "serviceId":"kendra", "signatureVersion":"v4", "signingName":"kendra", "targetPrefix":"AWSKendraFrontendService", - "uid":"kendra-2019-02-03" + "uid":"kendra-2019-02-03", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateEntitiesToExperience":{ @@ -156,7 +158,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates an access configuration for your documents. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

You can use this to re-configure your existing document level access control without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. One of these users leaves the company or switches to a team that should be blocked from accessing top-secret documents. The user still has access to top-secret documents because the user had access when your documents were previously indexed. You can create a specific access control configuration for the user with deny access. You can later update the access control configuration to allow access if the user returns to the company and re-joins the 'top-secret' team. You can re-configure access control for your documents as circumstances change.

To apply your access control configuration to certain documents, you call the BatchPutDocument API with the AccessControlConfigurationId included in the Document object. If you use an S3 bucket as a data source, you update the .metadata.json with the AccessControlConfigurationId and synchronize your data source. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument API.

" + "documentation":"

Creates an access configuration for your documents. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

You can use this to re-configure your existing document level access control without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. One of these users leaves the company or switches to a team that should be blocked from accessing top-secret documents. The user still has access to top-secret documents because the user had access when your documents were previously indexed. You can create a specific access control configuration for the user with deny access. You can later update the access control configuration to allow access if the user returns to the company and re-joins the 'top-secret' team. You can re-configure access control for your documents as circumstances change.

To apply your access control configuration to certain documents, you call the BatchPutDocument API with the AccessControlConfigurationId included in the Document object. If you use an S3 bucket as a data source, you update the .metadata.json with the AccessControlConfigurationId and synchronize your data source. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument API.

You can't configure access control using CreateAccessControlConfiguration for an Amazon Kendra Gen AI Enterprise Edition index. Amazon Kendra will return a ValidationException error for a Gen_AI_ENTERPRISE_EDITION index.

" }, "CreateDataSource":{ "name":"CreateDataSource", @@ -360,7 +362,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Removes an FAQ from an index.

" + "documentation":"

Removes a FAQ from an index.

" }, "DeleteIndex":{ "name":"DeleteIndex", @@ -394,7 +396,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes a group so that all users and sub groups that belong to the group can no longer access documents only available to that group.

For example, after deleting the group \"Summer Interns\", all interns who belonged to that group no longer see intern-only documents in their search results.

If you want to delete or replace users or sub groups of a group, you need to use the PutPrincipalMapping operation. For example, if a user in the group \"Engineering\" leaves the engineering team and another user takes their place, you provide an updated list of users or sub groups that belong to the \"Engineering\" group when calling PutPrincipalMapping. You can update your internal list of users or sub groups and input this list when calling PutPrincipalMapping.

DeletePrincipalMapping is currently not supported in the Amazon Web Services GovCloud (US-West) region.

" + "documentation":"

Deletes a group so that all users that belong to the group can no longer access documents only available to that group.

For example, after deleting the group \"Summer Interns\", all interns who belonged to that group no longer see intern-only documents in their search results.

If you want to delete or replace users or sub groups of a group, you need to use the PutPrincipalMapping operation. For example, if a user in the group \"Engineering\" leaves the engineering team and another user takes their place, you provide an updated list of users or sub groups that belong to the \"Engineering\" group when calling PutPrincipalMapping. You can update your internal list of users or sub groups and input this list when calling PutPrincipalMapping.

DeletePrincipalMapping is currently not supported in the Amazon Web Services GovCloud (US-West) region.

" }, "DeleteQuerySuggestionsBlockList":{ "name":"DeleteQuerySuggestionsBlockList", @@ -496,7 +498,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets information about an FAQ list.

" + "documentation":"

Gets information about a FAQ.

" }, "DescribeFeaturedResultsSet":{ "name":"DescribeFeaturedResultsSet", @@ -787,7 +789,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets a list of FAQ lists associated with an index.

" + "documentation":"

Gets a list of FAQs associated with an index.

" }, "ListFeaturedResultsSets":{ "name":"ListFeaturedResultsSets", @@ -872,7 +874,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets a list of tags associated with a specified resource. Indexes, FAQs, and data sources can have tags associated with them.

" + "documentation":"

Gets a list of tags associated with a resource. Indexes, FAQs, data sources, and other resources can have tags associated with them.

" }, "ListThesauri":{ "name":"ListThesauri", @@ -926,7 +928,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

Searches an index given an input query.

If you are working with large language models (LLMs) or implementing retrieval augmented generation (RAG) systems, you can use Amazon Kendra's Retrieve API, which can return longer semantically relevant passages. We recommend using the Retrieve API instead of filing a service limit increase to increase the Query API document excerpt length.

You can configure boosting or relevance tuning at the query level to override boosting at the index level, filter based on document fields/attributes and faceted search, and filter based on the user or their group access to documents. You can also include certain fields in the response that might provide useful additional information.

A query response contains three types of results.

You can specify that the query return only one type of result using the QueryResultTypeFilter parameter. Each query returns the 100 most relevant results. If you filter result type to only question-answers, a maximum of four results are returned. If you filter result type to only answers, a maximum of three results are returned.

" + "documentation":"

Searches an index given an input query.

If you are working with large language models (LLMs) or implementing retrieval augmented generation (RAG) systems, you can use Amazon Kendra's Retrieve API, which can return longer semantically relevant passages. We recommend using the Retrieve API instead of filing a service limit increase to increase the Query API document excerpt length.

You can configure boosting or relevance tuning at the query level to override boosting at the index level, filter based on document fields/attributes and faceted search, and filter based on the user or their group access to documents. You can also include certain fields in the response that might provide useful additional information.

A query response contains three types of results.

You can specify that the query return only one type of result using the QueryResultTypeFilter parameter. Each query returns the 100 most relevant results. If you filter result type to only question-answers, a maximum of four results are returned. If you filter result type to only answers, a maximum of three results are returned.

If you're using an Amazon Kendra Gen AI Enterprise Edition index, you can only use ATTRIBUTE_FILTER to filter search results by user context. If you're using an Amazon Kendra Gen AI Enterprise Edition index and you try to use USER_TOKEN to configure user context policy, Amazon Kendra returns a ValidationException error.

" }, "Retrieve":{ "name":"Retrieve", @@ -945,7 +947,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

Retrieves relevant passages or text excerpts given an input query.

This API is similar to the Query API. However, by default, the Query API only returns excerpt passages of up to 100 token words. With the Retrieve API, you can retrieve longer passages of up to 200 token words and up to 100 semantically relevant passages. This doesn't include question-answer or FAQ type responses from your index. The passages are text excerpts that can be semantically extracted from multiple documents and multiple parts of the same document. If in extreme cases your documents produce zero passages using the Retrieve API, you can alternatively use the Query API and its types of responses.

You can also do the following:

You can also include certain fields in the response that might provide useful additional information.

The Retrieve API shares the number of query capacity units that you set for your index. For more information on what's included in a single capacity unit and the default base capacity for an index, see Adjusting capacity.

" + "documentation":"

Retrieves relevant passages or text excerpts given an input query.

This API is similar to the Query API. However, by default, the Query API only returns excerpt passages of up to 100 token words. With the Retrieve API, you can retrieve longer passages of up to 200 token words and up to 100 semantically relevant passages. This doesn't include question-answer or FAQ type responses from your index. The passages are text excerpts that can be semantically extracted from multiple documents and multiple parts of the same document. If in extreme cases your documents produce zero passages using the Retrieve API, you can alternatively use the Query API and its types of responses.

You can also do the following:

You can also include certain fields in the response that might provide useful additional information.

The Retrieve API shares the number of query capacity units that you set for your index. For more information on what's included in a single capacity unit and the default base capacity for an index, see Adjusting capacity.

If you're using an Amazon Kendra Gen AI Enterprise Edition index, you can only use ATTRIBUTE_FILTER to filter search results by user context. If you're using an Amazon Kendra Gen AI Enterprise Edition index and you try to use USER_TOKEN to configure user context policy, Amazon Kendra returns a ValidationException error.

" }, "StartDataSourceSyncJob":{ "name":"StartDataSourceSyncJob", @@ -1014,7 +1016,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Adds the specified tag to the specified index, FAQ, or data source resource. If the tag already exists, the existing value is replaced with the new value.

" + "documentation":"

Adds the specified tag to the specified index, FAQ, data source, or other resource. If the tag already exists, the existing value is replaced with the new value.

" }, "UntagResource":{ "name":"UntagResource", @@ -1031,7 +1033,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Removes a tag from an index, FAQ, or a data source.

" + "documentation":"

Removes a tag from an index, FAQ, data source, or other resource.

" }, "UpdateAccessControlConfiguration":{ "name":"UpdateAccessControlConfiguration", @@ -1050,7 +1052,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates an access control configuration for your documents in an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

You can update an access control configuration you created without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. You created an 'allow' access control configuration for one user who recently joined the 'top-secret' team, switching from a team with 'deny' access to top-secret documents. However, the user suddenly returns to their previous team and should no longer have access to top secret documents. You can update the access control configuration to re-configure access control for your documents as circumstances change.

You call the BatchPutDocument API to apply the updated access control configuration, with the AccessControlConfigurationId included in the Document object. If you use an S3 bucket as a data source, you synchronize your data source to apply the AccessControlConfigurationId in the .metadata.json file. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument API.

" + "documentation":"

Updates an access control configuration for your documents in an index. This includes user and group access information for your documents. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

You can update an access control configuration you created without indexing all of your documents again. For example, your index contains top-secret company documents that only certain employees or users should access. You created an 'allow' access control configuration for one user who recently joined the 'top-secret' team, switching from a team with 'deny' access to top-secret documents. However, the user suddenly returns to their previous team and should no longer have access to top secret documents. You can update the access control configuration to re-configure access control for your documents as circumstances change.

You call the BatchPutDocument API to apply the updated access control configuration, with the AccessControlConfigurationId included in the Document object. If you use an S3 bucket as a data source, you synchronize your data source to apply the AccessControlConfigurationId in the .metadata.json file. Amazon Kendra currently only supports access control configuration for S3 data sources and documents indexed using the BatchPutDocument API.

You can't configure access control using CreateAccessControlConfiguration for an Amazon Kendra Gen AI Enterprise Edition index. Amazon Kendra will return a ValidationException error for a Gen_AI_ENTERPRISE_EDITION index.

" }, "UpdateDataSource":{ "name":"UpdateDataSource", @@ -1467,7 +1469,7 @@ "documentation":"

Performs a less than or equals operation on document attributes/fields and their values. Use with the document attribute type Date or Long.

" } }, - "documentation":"

Filters the search results based on document attributes or fields.

You can filter results using attributes for your particular documents. The attributes must exist in your index. For example, if your documents include the custom attribute \"Department\", you can filter documents that belong to the \"HR\" department. You would use the EqualsTo operation to filter results or documents with \"Department\" equals to \"HR\".

You can use AndAllFilters and AndOrFilters in combination with each other or with other operations such as EqualsTo. For example:

AndAllFilters

This example filters results or documents that belong to the HR department and belong to projects that contain \"new hires\" or \"new hiring\" in the project name (must use ContainAny with StringListValue). This example is filtering with a depth of 2.

You cannot filter more than a depth of 2, otherwise you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\" Also, if you use more than 10 attribute filters in a given list for AndAllFilters or OrAllFilters, you receive a ValidationException with the message \"AttributeFilter cannot have a length of more than 10\".

For examples of using AttributeFilter, see Using document attributes to filter search results.

" + "documentation":"

Filters the search results based on document attributes or fields.

You can filter results using attributes for your particular documents. The attributes must exist in your index. For example, if your documents include the custom attribute \"Department\", you can filter documents that belong to the \"HR\" department. You would use the EqualsTo operation to filter results or documents with \"Department\" equals to \"HR\".

You can use AndAllFilters and OrAllFilters in combination with each other or with other operations such as EqualsTo. For example:

AndAllFilters

This example filters results or documents that belong to the HR department AND belong to projects that contain \"new hires\" OR \"new hiring\" in the project name (must use ContainAny with StringListValue). This example is filtering with a depth of 2.

You cannot filter more than a depth of 2, otherwise you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\" Also, if you use more than 10 attribute filters in a given list for AndAllFilters or OrAllFilters, you receive a ValidationException with the message \"AttributeFilter cannot have a length of more than 10\".

For examples of using AttributeFilter, see Using document attributes to filter search results.

" }, "AttributeFilterList":{ "type":"list", @@ -1558,7 +1560,7 @@ }, "Credentials":{ "shape":"SecretArn", - "documentation":"

Your secret ARN, which you can create in Secrets Manager

You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.

" + "documentation":"

The Amazon Resource Name (ARN) of an Secrets Manager secret. You create a secret to store your credentials in Secrets Manager

You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.

" } }, "documentation":"

Provides the configuration information to connect to websites that require basic user authentication.

" @@ -1603,6 +1605,10 @@ "shape":"DocumentId", "documentation":"

The identifier of the document that couldn't be removed from the index.

" }, + "DataSourceId":{ + "shape":"DataSourceId", + "documentation":"

The identifier of the data source connector that the document belongs to.

" + }, "ErrorCode":{ "shape":"ErrorCode", "documentation":"

The error code for why the document couldn't be removed from the index.

" @@ -1709,6 +1715,10 @@ "shape":"DocumentId", "documentation":"

The identifier of the document whose status could not be retrieved.

" }, + "DataSourceId":{ + "shape":"DataSourceId", + "documentation":"

The identifier of the data source connector that the failed document belongs to.

" + }, "ErrorCode":{ "shape":"ErrorCode", "documentation":"

Indicates the source of the error.

" @@ -1765,6 +1775,10 @@ "shape":"DocumentId", "documentation":"

The identifier of the document.

" }, + "DataSourceId":{ + "shape":"DataSourceId", + "documentation":"

The identifier of the data source connector that the failed document belongs to.

" + }, "ErrorCode":{ "shape":"ErrorCode", "documentation":"

The type of error that caused the document to fail to be indexed.

" @@ -2360,7 +2374,7 @@ }, "SecretArn":{ "shape":"SecretArn", - "documentation":"

The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Database Data Source. For more information about Secrets Manager, see What Is Secrets Manager in the Secrets Manager user guide.

" + "documentation":"

The Amazon Resource Name (ARN) of an Secrets Manager secret that stores the credentials. The credentials should be a user-password pair. For more information, see Using a Database Data Source. For more information about Secrets Manager, see What Is Secrets Manager in the Secrets Manager user guide.

" } }, "documentation":"

Provides the configuration information that's required to connect to a database.

" @@ -2616,7 +2630,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of an IAM role with permission to access the S3 bucket that contains the FAQs. For more information, see IAM access roles for Amazon Kendra.

" + "documentation":"

The Amazon Resource Name (ARN) of an IAM role with permission to access the S3 bucket that contains the FAQ file. For more information, see IAM access roles for Amazon Kendra.

" }, "Tags":{ "shape":"TagList", @@ -2709,7 +2723,7 @@ }, "Edition":{ "shape":"IndexEdition", - "documentation":"

The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for production. Once you set the edition for an index, it can't be changed.

The Edition parameter is optional. If you don't supply a value, the default is ENTERPRISE_EDITION.

For more information on quota limits for Enterprise and Developer editions, see Quotas.

" + "documentation":"

The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for production. Use GEN_AI_ENTERPRISE_EDITION for creating generative AI applications. Once you set the edition for an index, it can't be changed.

The Edition parameter is optional. If you don't supply a value, the default is ENTERPRISE_EDITION.

For more information on quota limits for Gen AI Enterprise Edition, Enterprise Edition, and Developer Edition indices, see Quotas.

" }, "RoleArn":{ "shape":"RoleArn", @@ -2734,15 +2748,15 @@ }, "UserTokenConfigurations":{ "shape":"UserTokenConfigurationList", - "documentation":"

The user token configuration.

" + "documentation":"

The user token configuration.

If you're using an Amazon Kendra Gen AI Enterprise Edition index and you try to use UserTokenConfigurations to configure user context policy, Amazon Kendra returns a ValidationException error.

" }, "UserContextPolicy":{ "shape":"UserContextPolicy", - "documentation":"

The user context policy.

ATTRIBUTE_FILTER

All indexed content is searchable and displayable for all users. If you want to filter search results on user context, you can use the attribute filters of _user_id and _group_ids or you can provide user and group information in UserContext.

USER_TOKEN

Enables token-based user access control to filter search results on user context. All documents with no access control and all documents accessible to the user will be searchable and displayable.

" + "documentation":"

The user context policy.

If you're using an Amazon Kendra Gen AI Enterprise Edition index, you can only use ATTRIBUTE_FILTER to filter search results by user context. If you're using an Amazon Kendra Gen AI Enterprise Edition index and you try to use USER_TOKEN to configure user context policy, Amazon Kendra returns a ValidationException error.

ATTRIBUTE_FILTER

All indexed content is searchable and displayable for all users. If you want to filter search results on user context, you can use the attribute filters of _user_id and _group_ids or you can provide user and group information in UserContext.

USER_TOKEN

Enables token-based user access control to filter search results on user context. All documents with no access control and all documents accessible to the user will be searchable and displayable.

" }, "UserGroupResolutionConfiguration":{ "shape":"UserGroupResolutionConfiguration", - "documentation":"

Gets users and groups from IAM Identity Center identity source. To configure this, see UserGroupResolutionConfiguration. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

" + "documentation":"

Gets users and groups from IAM Identity Center identity source. To configure this, see UserGroupResolutionConfiguration. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

If you're using an Amazon Kendra Gen AI Enterprise Edition index, UserGroupResolutionConfiguration isn't supported.

" } } }, @@ -2870,7 +2884,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of a role with permission to run PreExtractionHookConfiguration and PostExtractionHookConfiguration for altering document metadata and content during the document ingestion process. For more information, see IAM roles for Amazon Kendra.

" + "documentation":"

The Amazon Resource Name (ARN) of an IAM role with permission to run PreExtractionHookConfiguration and PostExtractionHookConfiguration for altering document metadata and content during the document ingestion process. For more information, see an IAM roles for Amazon Kendra.

" } }, "documentation":"

Provides the configuration information for altering document metadata and content during the document ingestion process.

For more information, see Customizing document metadata during the ingestion process.

" @@ -3549,7 +3563,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the role with permission to access the data source and required resources.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role with permission to access the data source and required resources.

" }, "ErrorMessage":{ "shape":"ErrorMessage", @@ -3623,7 +3637,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

Shows the Amazon Resource Name (ARN) of a role with permission to access Query API, QuerySuggestions API, SubmitFeedback API, and IAM Identity Center that stores your user and group information.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role with permission to access the Query API, QuerySuggestions API, SubmitFeedback API, and IAM Identity Center that stores your users and groups information.

" }, "ErrorMessage":{ "shape":"ErrorMessage", @@ -3682,7 +3696,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the role that provides access to the S3 bucket containing the input files for the FAQ.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides access to the S3 bucket containing the FAQ file.

" }, "ErrorMessage":{ "shape":"ErrorMessage", @@ -3690,7 +3704,7 @@ }, "FileFormat":{ "shape":"FaqFileFormat", - "documentation":"

The file format used by the input files for the FAQ.

" + "documentation":"

The file format used for the FAQ file.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -4852,10 +4866,10 @@ "members":{ "IndexedQuestionAnswersCount":{ "shape":"IndexedQuestionAnswersCount", - "documentation":"

The total number of FAQ questions and answers contained in the index.

" + "documentation":"

The total number of FAQ questions and answers for an index.

" } }, - "documentation":"

Provides statistical information about the FAQ questions and answers contained in an index.

" + "documentation":"

Provides statistical information about the FAQ questions and answers for an index.

" }, "FaqStatus":{ "type":"string", @@ -5447,7 +5461,7 @@ "members":{ "MemberGroups":{ "shape":"MemberGroups", - "documentation":"

A list of sub groups that belong to a group. For example, the sub groups \"Research\", \"Engineering\", and \"Sales and Marketing\" all belong to the group \"Company\".

" + "documentation":"

A list of users that belong to a group. This can also include sub groups. For example, the sub groups \"Research\", \"Engineering\", and \"Sales and Marketing\" all belong to the group \"Company A\".

" }, "MemberUsers":{ "shape":"MemberUsers", @@ -5458,7 +5472,7 @@ "documentation":"

If you have more than 1000 users and/or sub groups for a single group, you need to provide the path to the S3 file that lists your users and sub groups for a group. Your sub groups can contain more than 1000 users, but the list of sub groups that belong to a group (and/or users) must be no more than 1000.

You can download this example S3 file that uses the correct format for listing group members. Note, dataSourceId is optional. The value of type for a group is always GROUP and for a user it is always USER.

" } }, - "documentation":"

A list of users or sub groups that belong to a group. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

" + "documentation":"

A list of users that belong to a group. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

" }, "GroupOrderingIdSummaries":{ "type":"list", @@ -5579,7 +5593,7 @@ }, "LambdaArn":{ "shape":"LambdaArn", - "documentation":"

The Amazon Resource Name (ARN) of a role with permission to run a Lambda function during ingestion. For more information, see IAM roles for Amazon Kendra.

" + "documentation":"

The Amazon Resource Name (ARN) of an IAM role with permission to run a Lambda function during ingestion. For more information, see an IAM roles for Amazon Kendra.

" }, "S3Bucket":{ "shape":"S3BucketName", @@ -5648,7 +5662,8 @@ "type":"string", "enum":[ "DEVELOPER_EDITION", - "ENTERPRISE_EDITION" + "ENTERPRISE_EDITION", + "GEN_AI_ENTERPRISE_EDITION" ] }, "IndexFieldName":{ @@ -6164,7 +6179,7 @@ "members":{ "IndexId":{ "shape":"IndexId", - "documentation":"

The index that contains the FAQ lists.

" + "documentation":"

The index for the FAQs.

" }, "NextToken":{ "shape":"NextToken", @@ -6185,7 +6200,7 @@ }, "FaqSummaryItems":{ "shape":"FaqSummaryItems", - "documentation":"

information about the FAQs associated with the specified index.

" + "documentation":"

Summary information about the FAQs for a specified index.

" } } }, @@ -6329,7 +6344,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the index, FAQ, or data source to get a list of tags for.

" + "documentation":"

The Amazon Resource Name (ARN) of the index, FAQ, data source, or other resource to get a list of tags for. For example, the ARN of an index is constructed as follows: arn:aws:kendra:your-region:your-account-id:index/index-id For information on how to construct an ARN for all types of Amazon Kendra resources, see Resource types.

" } } }, @@ -6338,7 +6353,7 @@ "members":{ "Tags":{ "shape":"TagList", - "documentation":"

A list of tags associated with the index, FAQ, or data source.

" + "documentation":"

A list of tags associated with the index, FAQ, data source, or other resource.

" } } }, @@ -6625,7 +6640,7 @@ "members":{ "OneDriveUserList":{ "shape":"OneDriveUserList", - "documentation":"

A list of users whose documents should be indexed. Specify the user names in email format, for example, username@tenantdomain. If you need to index the documents of more than 100 users, use the OneDriveUserS3Path field to specify the location of a file containing a list of users.

" + "documentation":"

A list of users whose documents should be indexed. Specify the user names in email format, for example, username@tenantdomain. If you need to index the documents of more than 10 users, use the OneDriveUserS3Path field to specify the location of a file containing a list of users.

" }, "OneDriveUserS3Path":{ "shape":"S3Path", @@ -6775,7 +6790,7 @@ }, "Credentials":{ "shape":"SecretArn", - "documentation":"

Your secret ARN, which you can create in Secrets Manager

The credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.

" + "documentation":"

The Amazon Resource Name (ARN) of an Secrets Manager secret. You create a secret to store your credentials in Secrets Manager

The credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.

" } }, "documentation":"

Provides the configuration information for a web proxy to connect to website hosts.

" @@ -6806,15 +6821,15 @@ }, "GroupMembers":{ "shape":"GroupMembers", - "documentation":"

The list that contains your users or sub groups that belong the same group.

For example, the group \"Company\" includes the user \"CEO\" and the sub groups \"Research\", \"Engineering\", and \"Sales and Marketing\".

If you have more than 1000 users and/or sub groups for a single group, you need to provide the path to the S3 file that lists your users and sub groups for a group. Your sub groups can contain more than 1000 users, but the list of sub groups that belong to a group (and/or users) must be no more than 1000.

" + "documentation":"

The list that contains your users that belong the same group. This can include sub groups that belong to a group.

For example, the group \"Company A\" includes the user \"CEO\" and the sub groups \"Research\", \"Engineering\", and \"Sales and Marketing\".

If you have more than 1000 users and/or sub groups for a single group, you need to provide the path to the S3 file that lists your users and sub groups for a group. Your sub groups can contain more than 1000 users, but the list of sub groups that belong to a group (and/or users) must be no more than 1000.

" }, "OrderingId":{ "shape":"PrincipalOrderingId", - "documentation":"

The timestamp identifier you specify to ensure Amazon Kendra does not override the latest PUT action with previous actions. The highest number ID, which is the ordering ID, is the latest action you want to process and apply on top of other actions with lower number IDs. This prevents previous actions with lower number IDs from possibly overriding the latest action.

The ordering ID can be the Unix time of the last update you made to a group members list. You would then provide this list when calling PutPrincipalMapping. This ensures your PUT action for that updated group with the latest members list doesn't get overwritten by earlier PUT actions for the same group which are yet to be processed.

The default ordering ID is the current Unix time in milliseconds that the action was received by Amazon Kendra.

" + "documentation":"

The timestamp identifier you specify to ensure Amazon Kendra doesn't override the latest PUT action with previous actions. The highest number ID, which is the ordering ID, is the latest action you want to process and apply on top of other actions with lower number IDs. This prevents previous actions with lower number IDs from possibly overriding the latest action.

The ordering ID can be the Unix time of the last update you made to a group members list. You would then provide this list when calling PutPrincipalMapping. This ensures your PUT action for that updated group with the latest members list doesn't get overwritten by earlier PUT actions for the same group which are yet to be processed.

The default ordering ID is the current Unix time in milliseconds that the action was received by Amazon Kendra.

" }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of a role that has access to the S3 file that contains your list of users or sub groups that belong to a group.

For more information, see IAM roles for Amazon Kendra.

" + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that has access to the S3 file that contains your list of users that belong to a group.

For more information, see IAM roles for Amazon Kendra.

" } } }, @@ -6849,7 +6864,7 @@ }, "AttributeFilter":{ "shape":"AttributeFilter", - "documentation":"

Filters search results by document fields/attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters parameters contain a list of other filters.

The AttributeFilter parameter means you can create a set of filtering rules that a document must satisfy to be included in the query results.

" + "documentation":"

Filters search results by document fields/attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters parameters contain a list of other filters.

The AttributeFilter parameter means you can create a set of filtering rules that a document must satisfy to be included in the query results.

For Amazon Kendra Gen AI Enterprise Edition indices use AttributeFilter to enable document filtering for end users using _email_id or include public documents (_email_id=null).

" }, "Facets":{ "shape":"FacetList", @@ -7271,7 +7286,7 @@ }, "AttributeFilter":{ "shape":"AttributeFilter", - "documentation":"

Filters search results by document fields/attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters parameters contain a list of other filters.

The AttributeFilter parameter means you can create a set of filtering rules that a document must satisfy to be included in the query results.

" + "documentation":"

Filters search results by document fields/attributes. You can only provide one attribute filter; however, the AndAllFilters, NotFilter, and OrAllFilters parameters contain a list of other filters.

The AttributeFilter parameter means you can create a set of filtering rules that a document must satisfy to be included in the query results.

For Amazon Kendra Gen AI Enterprise Edition indices use AttributeFilter to enable document filtering for end users using _email_id or include public documents (_email_id=null).

" }, "RequestedDocumentAttributes":{ "shape":"DocumentAttributeKeyList", @@ -8503,14 +8518,14 @@ "members":{ "Key":{ "shape":"TagKey", - "documentation":"

The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, or data source.

" + "documentation":"

The key for the tag. Keys are not case sensitive and must be unique for the index, FAQ, data source, or other resource.

" }, "Value":{ "shape":"TagValue", "documentation":"

The value associated with the tag. The value may be an empty string but it can't be null.

" } }, - "documentation":"

A list of key/value pairs that identify an index, FAQ, or data source. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @.

" + "documentation":"

A key-value pair that identifies or categorizes an index, FAQ, data source, or other resource. TA tag key and value can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @.

" }, "TagKey":{ "type":"string", @@ -8538,11 +8553,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the index, FAQ, or data source to tag.

" + "documentation":"

The Amazon Resource Name (ARN) of the index, FAQ, data source, or other resource to add a tag. For example, the ARN of an index is constructed as follows: arn:aws:kendra:your-region:your-account-id:index/index-id For information on how to construct an ARN for all types of Amazon Kendra resources, see Resource types.

" }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tag keys to add to the index, FAQ, or data source. If a tag already exists, the existing value is replaced with the new value.

" + "documentation":"

A list of tag keys to add to the index, FAQ, data source, or other resource. If a tag already exists, the existing value is replaced with the new value.

" } } }, @@ -8721,11 +8736,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the index, FAQ, or data source to remove the tag from.

" + "documentation":"

The Amazon Resource Name (ARN) of the index, FAQ, data source, or other resource to remove a tag. For example, the ARN of an index is constructed as follows: arn:aws:kendra:your-region:your-account-id:index/index-id For information on how to construct an ARN for all types of Amazon Kendra resources, see Resource types.

" }, "TagKeys":{ "shape":"TagKeyList", - "documentation":"

A list of tag keys to remove from the index, FAQ, or data source. If a tag key does not exist on the resource, it is ignored.

" + "documentation":"

A list of tag keys to remove from the index, FAQ, data source, or other resource. If a tag key doesn't exist for the resource, it is ignored.

" } } }, @@ -8809,7 +8824,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source and required resources. For more information, see IAM roles for Amazon Kendra.

" + "documentation":"

The Amazon Resource Name (ARN) of an IAM role with permission to access the data source and required resources. For more information, see IAM roles for Amazon Kendra.

" }, "LanguageCode":{ "shape":"LanguageCode", @@ -8842,7 +8857,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access Query API, QuerySuggestions API, SubmitFeedback API, and IAM Identity Center that stores your user and group information. For more information, see IAM roles for Amazon Kendra.

" + "documentation":"

The Amazon Resource Name (ARN) of an IAM role with permission to access the Query API, QuerySuggestions API, SubmitFeedback API, and IAM Identity Center that stores your users and groups information. For more information, see IAM roles for Amazon Kendra.

" }, "Configuration":{ "shape":"ExperienceConfiguration", @@ -8930,15 +8945,15 @@ }, "UserTokenConfigurations":{ "shape":"UserTokenConfigurationList", - "documentation":"

The user token configuration.

" + "documentation":"

The user token configuration.

If you're using an Amazon Kendra Gen AI Enterprise Edition index and you try to use UserTokenConfigurations to configure user context policy, Amazon Kendra returns a ValidationException error.

" }, "UserContextPolicy":{ "shape":"UserContextPolicy", - "documentation":"

The user context policy.

" + "documentation":"

The user context policy.

If you're using an Amazon Kendra Gen AI Enterprise Edition index, you can only use ATTRIBUTE_FILTER to filter search results by user context. If you're using an Amazon Kendra Gen AI Enterprise Edition index and you try to use USER_TOKEN to configure user context policy, Amazon Kendra returns a ValidationException error.

" }, "UserGroupResolutionConfiguration":{ "shape":"UserGroupResolutionConfiguration", - "documentation":"

Gets users and groups from IAM Identity Center identity source. To configure this, see UserGroupResolutionConfiguration. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

" + "documentation":"

Gets users and groups from IAM Identity Center identity source. To configure this, see UserGroupResolutionConfiguration. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents.

If you're using an Amazon Kendra Gen AI Enterprise Edition index, UserGroupResolutionConfiguration isn't supported.

" } } }, @@ -9085,7 +9100,7 @@ "documentation":"

The list of data source groups you want to filter search results based on groups' access to documents in that data source.

" } }, - "documentation":"

Provides information about the user context for an Amazon Kendra index.

User context filtering is a kind of personalized search with the benefit of controlling access to documents. For example, not all teams that search the company portal for information should access top-secret company documents, nor are these documents relevant to all users. Only specific users or groups of teams given access to top-secret documents should see these documents in their search results.

You provide one of the following:

If you provide both, an exception is thrown.

" + "documentation":"

Provides information about the user context for an Amazon Kendra index.

User context filtering is a kind of personalized search with the benefit of controlling access to documents. For example, not all teams that search the company portal for information should access top-secret company documents, nor are these documents relevant to all users. Only specific users or groups of teams given access to top-secret documents should see these documents in their search results.

You provide one of the following:

If you provide both, an exception is thrown.

If you're using an Amazon Kendra Gen AI Enterprise Edition index, you can use UserId, Groups, and DataSourceGroups to filter content. If you set the UserId to a particular user ID, it also includes all public documents.

Amazon Kendra Gen AI Enterprise Edition indices don't support token based document filtering. If you're using an Amazon Kendra Gen AI Enterprise Edition index, Amazon Kendra returns a ValidationException error if the Token field has a non-null value.

" }, "UserContextPolicy":{ "type":"string", @@ -9103,7 +9118,7 @@ "documentation":"

The identity store provider (mode) you want to use to get users and groups. IAM Identity Center is currently the only available mode. Your users and groups must exist in an IAM Identity Center identity source in order to use this mode.

" } }, - "documentation":"

Provides the configuration information to get users and groups from an IAM Identity Center identity source. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents. You can also use the PutPrincipalMapping API to map users to their groups so that you only need to provide the user ID when you issue the query.

To set up an IAM Identity Center identity source in the console to use with Amazon Kendra, see Getting started with an IAM Identity Center identity source. You must also grant the required permissions to use IAM Identity Center with Amazon Kendra. For more information, see IAM roles for IAM Identity Center.

Amazon Kendra currently does not support using UserGroupResolutionConfiguration with an Amazon Web Services organization member account for your IAM Identity Center identify source. You must create your index in the management account for the organization in order to use UserGroupResolutionConfiguration.

" + "documentation":"

Provides the configuration information to get users and groups from an IAM Identity Center identity source. This is useful for user context filtering, where search results are filtered based on the user or their group access to documents. You can also use the PutPrincipalMapping API to map users to their groups so that you only need to provide the user ID when you issue the query.

To set up an IAM Identity Center identity source in the console to use with Amazon Kendra, see Getting started with an IAM Identity Center identity source. You must also grant the required permissions to use IAM Identity Center with Amazon Kendra. For more information, see IAM roles for IAM Identity Center.

Amazon Kendra currently does not support using UserGroupResolutionConfiguration with an Amazon Web Services organization member account for your IAM Identity Center identify source. You must create your index in the management account for the organization in order to use UserGroupResolutionConfiguration.

If you're using an Amazon Kendra Gen AI Enterprise Edition index, UserGroupResolutionConfiguration isn't supported.

" }, "UserGroupResolutionMode":{ "type":"string", @@ -9146,7 +9161,7 @@ "documentation":"

Information about the JSON token type configuration.

" } }, - "documentation":"

Provides the configuration information for a token.

" + "documentation":"

Provides the configuration information for a token.

If you're using an Amazon Kendra Gen AI Enterprise Edition index and you try to use UserTokenConfigurations to configure user context policy, Amazon Kendra returns a ValidationException error.

" }, "UserTokenConfigurationList":{ "type":"list", diff --git a/botocore/data/sagemaker/2017-07-24/paginators-1.json b/botocore/data/sagemaker/2017-07-24/paginators-1.json index d12fd9c57d..03e461a8b3 100644 --- a/botocore/data/sagemaker/2017-07-24/paginators-1.json +++ b/botocore/data/sagemaker/2017-07-24/paginators-1.json @@ -443,6 +443,30 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "OptimizationJobSummaries" + }, + "ListClusterSchedulerConfigs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ClusterSchedulerConfigSummaries" + }, + "ListComputeQuotas": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ComputeQuotaSummaries" + }, + "ListPartnerApps": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Summaries" + }, + "ListTrainingPlans": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "TrainingPlanSummaries" } } } diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 10ddbecb8d..516e09250c 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -182,6 +182,20 @@ ], "documentation":"

Creates a SageMaker HyperPod cluster. SageMaker HyperPod is a capability of SageMaker for creating and managing persistent clusters for developing large machine learning models, such as large language models (LLMs) and diffusion models. To learn more, see Amazon SageMaker HyperPod in the Amazon SageMaker Developer Guide.

" }, + "CreateClusterSchedulerConfig":{ + "name":"CreateClusterSchedulerConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateClusterSchedulerConfigRequest"}, + "output":{"shape":"CreateClusterSchedulerConfigResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"}, + {"shape":"ConflictException"} + ], + "documentation":"

Create cluster policy configuration. This policy is used for task prioritization and fair-share allocation of idle compute. This helps prioritize critical workloads and distributes idle compute across entities.

" + }, "CreateCodeRepository":{ "name":"CreateCodeRepository", "http":{ @@ -206,6 +220,20 @@ ], "documentation":"

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

" }, + "CreateComputeQuota":{ + "name":"CreateComputeQuota", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateComputeQuotaRequest"}, + "output":{"shape":"CreateComputeQuotaResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"}, + {"shape":"ConflictException"} + ], + "documentation":"

Create compute allocation definition. This defines how compute is allocated, shared, and borrowed for specified entities. Specifically, how to lend and borrow idle compute and assign a fair-share weight to the specified entities.

" + }, "CreateContext":{ "name":"CreateContext", "http":{ @@ -683,6 +711,33 @@ ], "documentation":"

Creates a job that optimizes a model for inference performance. To create the job, you provide the location of a source model, and you provide the settings for the optimization techniques that you want the job to apply. When the job completes successfully, SageMaker uploads the new optimized model to the output destination that you specify.

For more information about how to use this action, and about the supported optimization techniques, see Optimize model inference with Amazon SageMaker.

" }, + "CreatePartnerApp":{ + "name":"CreatePartnerApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePartnerAppRequest"}, + "output":{"shape":"CreatePartnerAppResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"}, + {"shape":"ConflictException"} + ], + "documentation":"

Creates an Amazon SageMaker Partner AI App.

" + }, + "CreatePartnerAppPresignedUrl":{ + "name":"CreatePartnerAppPresignedUrl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePartnerAppPresignedUrlRequest"}, + "output":{"shape":"CreatePartnerAppPresignedUrlResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Creates a presigned URL to access an Amazon SageMaker Partner AI App.

" + }, "CreatePipeline":{ "name":"CreatePipeline", "http":{ @@ -804,6 +859,21 @@ ], "documentation":"

Starts a model training job. After training completes, SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

If you choose to host your model using SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than SageMaker, provided that you know how to use them for inference.

In the request body, you provide the following:

For more information about SageMaker, see How It Works.

" }, + "CreateTrainingPlan":{ + "name":"CreateTrainingPlan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTrainingPlanRequest"}, + "output":{"shape":"CreateTrainingPlanResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"}, + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} + ], + "documentation":"

Creates a new training plan in SageMaker to reserve compute capacity.

Amazon SageMaker Training Plan is a capability within SageMaker that allows customers to reserve and manage GPU capacity for large-scale AI model training. It provides a way to secure predictable access to computational resources within specific timelines and budgets, without the need to manage underlying infrastructure.

How it works

Plans can be created for specific resources such as SageMaker Training Jobs or SageMaker HyperPod clusters, automatically provisioning resources, setting up infrastructure, executing workloads, and handling infrastructure failures.

Plan creation workflow

Plan composition

A plan can consist of one or more Reserved Capacities, each defined by a specific instance type, quantity, Availability Zone, duration, and start and end times. For more information about Reserved Capacity, see ReservedCapacitySummary .

" + }, "CreateTransformJob":{ "name":"CreateTransformJob", "http":{ @@ -974,6 +1044,18 @@ ], "documentation":"

Delete a SageMaker HyperPod cluster.

" }, + "DeleteClusterSchedulerConfig":{ + "name":"DeleteClusterSchedulerConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteClusterSchedulerConfigRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes the cluster policy of the cluster.

" + }, "DeleteCodeRepository":{ "name":"DeleteCodeRepository", "http":{ @@ -995,6 +1077,18 @@ ], "documentation":"

Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker. It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates, the compilation logs in CloudWatch, the compiled model, or the IAM role.

You can delete a compilation job only if its current status is COMPLETED, FAILED, or STOPPED. If the job status is STARTING or INPROGRESS, stop the job, and then delete it after its status becomes STOPPED.

" }, + "DeleteComputeQuota":{ + "name":"DeleteComputeQuota", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteComputeQuotaRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes the compute allocation from the cluster.

" + }, "DeleteContext":{ "name":"DeleteContext", "http":{ @@ -1383,6 +1477,20 @@ ], "documentation":"

Deletes an optimization job.

" }, + "DeletePartnerApp":{ + "name":"DeletePartnerApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePartnerAppRequest"}, + "output":{"shape":"DeletePartnerAppResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes a SageMaker Partner AI App.

" + }, "DeletePipeline":{ "name":"DeletePipeline", "http":{ @@ -1630,6 +1738,19 @@ ], "documentation":"

Retrieves information of a node (also called a instance interchangeably) of a SageMaker HyperPod cluster.

" }, + "DescribeClusterSchedulerConfig":{ + "name":"DescribeClusterSchedulerConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeClusterSchedulerConfigRequest"}, + "output":{"shape":"DescribeClusterSchedulerConfigResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Description of the cluster policy. This policy is used for task prioritization and fair-share allocation. This helps prioritize critical workloads and distributes idle compute across entities.

" + }, "DescribeCodeRepository":{ "name":"DescribeCodeRepository", "http":{ @@ -1653,6 +1774,19 @@ ], "documentation":"

Returns information about a model compilation job.

To create a model compilation job, use CreateCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

" }, + "DescribeComputeQuota":{ + "name":"DescribeComputeQuota", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeComputeQuotaRequest"}, + "output":{"shape":"DescribeComputeQuotaResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Description of the compute allocation definition.

" + }, "DescribeContext":{ "name":"DescribeContext", "http":{ @@ -2110,6 +2244,19 @@ ], "documentation":"

Provides the properties of the specified optimization job.

" }, + "DescribePartnerApp":{ + "name":"DescribePartnerApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePartnerAppRequest"}, + "output":{"shape":"DescribePartnerAppResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Gets information about a SageMaker Partner AI App.

" + }, "DescribePipeline":{ "name":"DescribePipeline", "http":{ @@ -2221,6 +2368,19 @@ ], "documentation":"

Returns information about a training job.

Some of the attributes below only appear if the training job successfully starts. If the training job fails, TrainingJobStatus is Failed and, depending on the FailureReason, attributes like TrainingStartTime, TrainingTimeInSeconds, TrainingEndTime, and BillableTimeInSeconds may not be present in the response.

" }, + "DescribeTrainingPlan":{ + "name":"DescribeTrainingPlan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTrainingPlanRequest"}, + "output":{"shape":"DescribeTrainingPlanResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Retrieves detailed information about a specific training plan.

" + }, "DescribeTransformJob":{ "name":"DescribeTransformJob", "http":{ @@ -2526,6 +2686,16 @@ ], "documentation":"

Retrieves the list of instances (also called nodes interchangeably) in a SageMaker HyperPod cluster.

" }, + "ListClusterSchedulerConfigs":{ + "name":"ListClusterSchedulerConfigs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListClusterSchedulerConfigsRequest"}, + "output":{"shape":"ListClusterSchedulerConfigsResponse"}, + "documentation":"

List the cluster policy configurations.

" + }, "ListClusters":{ "name":"ListClusters", "http":{ @@ -2556,6 +2726,16 @@ "output":{"shape":"ListCompilationJobsResponse"}, "documentation":"

Lists model compilation jobs that satisfy various filters.

To create a model compilation job, use CreateCompilationJob. To get information about a particular model compilation job you have created, use DescribeCompilationJob.

" }, + "ListComputeQuotas":{ + "name":"ListComputeQuotas", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListComputeQuotasRequest"}, + "output":{"shape":"ListComputeQuotasResponse"}, + "documentation":"

List the resource allocation definitions.

" + }, "ListContexts":{ "name":"ListContexts", "http":{ @@ -3023,6 +3203,16 @@ "output":{"shape":"ListOptimizationJobsResponse"}, "documentation":"

Lists the optimization jobs in your account and their properties.

" }, + "ListPartnerApps":{ + "name":"ListPartnerApps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPartnerAppsRequest"}, + "output":{"shape":"ListPartnerAppsResponse"}, + "documentation":"

Lists all of the SageMaker Partner AI Apps in an account.

" + }, "ListPipelineExecutionSteps":{ "name":"ListPipelineExecutionSteps", "http":{ @@ -3178,6 +3368,16 @@ ], "documentation":"

Gets a list of TrainingJobSummary objects that describe the training jobs that a hyperparameter tuning job launched.

" }, + "ListTrainingPlans":{ + "name":"ListTrainingPlans", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTrainingPlansRequest"}, + "output":{"shape":"ListTrainingPlansResponse"}, + "documentation":"

Retrieves a list of training plans for the current account.

" + }, "ListTransformJobs":{ "name":"ListTransformJobs", "http":{ @@ -3320,6 +3520,19 @@ "output":{"shape":"SearchResponse"}, "documentation":"

Finds SageMaker resources that match a search query. Matching resources are returned as a list of SearchRecord objects in the response. You can sort the search results by any resource property in a ascending or descending order.

You can query against the following value types: numeric, text, Boolean, and timestamp.

The Search API may provide access to otherwise restricted data. See Amazon SageMaker API Permissions: Actions, Permissions, and Resources Reference for more information.

" }, + "SearchTrainingPlanOfferings":{ + "name":"SearchTrainingPlanOfferings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SearchTrainingPlanOfferingsRequest"}, + "output":{"shape":"SearchTrainingPlanOfferingsResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Searches for available training plan offerings based on specified criteria.

For more information about how to reserve GPU capacity for your SageMaker training jobs or SageMaker HyperPod clusters using Amazon SageMaker Training Plan , see CreateTrainingPlan .

" + }, "SendPipelineExecutionStepFailure":{ "name":"SendPipelineExecutionStepFailure", "http":{ @@ -3671,6 +3884,21 @@ ], "documentation":"

Updates a SageMaker HyperPod cluster.

" }, + "UpdateClusterSchedulerConfig":{ + "name":"UpdateClusterSchedulerConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateClusterSchedulerConfigRequest"}, + "output":{"shape":"UpdateClusterSchedulerConfigResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ResourceLimitExceeded"}, + {"shape":"ConflictException"} + ], + "documentation":"

Update the cluster policy configuration.

" + }, "UpdateClusterSoftware":{ "name":"UpdateClusterSoftware", "http":{ @@ -3698,6 +3926,21 @@ ], "documentation":"

Updates the specified Git repository with the specified values.

" }, + "UpdateComputeQuota":{ + "name":"UpdateComputeQuota", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateComputeQuotaRequest"}, + "output":{"shape":"UpdateComputeQuotaResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ResourceLimitExceeded"}, + {"shape":"ConflictException"} + ], + "documentation":"

Update the compute allocation definition.

" + }, "UpdateContext":{ "name":"UpdateContext", "http":{ @@ -3992,6 +4235,20 @@ ], "documentation":"

Updates a notebook instance lifecycle configuration created with the CreateNotebookInstanceLifecycleConfig API.

" }, + "UpdatePartnerApp":{ + "name":"UpdatePartnerApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePartnerAppRequest"}, + "output":{"shape":"UpdatePartnerAppResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ConflictException"} + ], + "documentation":"

Updates all of the SageMaker Partner AI Apps in an account.

" + }, "UpdatePipeline":{ "name":"UpdatePipeline", "http":{ @@ -4218,6 +4475,13 @@ }, "documentation":"

Lists the properties of an action. An action represents an action or activity. Some examples are a workflow step and a model deployment. Generally, an action involves at least one input artifact or output artifact.

" }, + "ActivationState":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, "AddAssociationRequest":{ "type":"structure", "required":[ @@ -6165,6 +6429,16 @@ "type":"string", "enum":["Enabled"] }, + "AvailabilityZone":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[a-z]+\\-[0-9a-z\\-]+" + }, + "AvailableInstanceCount":{ + "type":"integer", + "min":0 + }, "AwsManagedHumanLoopRequestSource":{ "type":"string", "enum":[ @@ -6485,6 +6759,11 @@ "Or" ] }, + "BorrowLimit":{ + "type":"integer", + "max":500, + "min":1 + }, "Branch":{ "type":"string", "max":1024, @@ -7359,6 +7638,18 @@ "shape":"OnStartDeepHealthChecks", "documentation":"

A flag indicating whether deep health checks should be performed when the cluster instance group is created or updated.

" }, + "Status":{ + "shape":"InstanceGroupStatus", + "documentation":"

The current status of the cluster instance group.

" + }, + "TrainingPlanArn":{ + "shape":"TrainingPlanArn", + "documentation":"

The Amazon Resource Name (ARN); of the training plan associated with this cluster instance group.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" + }, + "TrainingPlanStatus":{ + "shape":"InstanceGroupTrainingPlanStatus", + "documentation":"

The current status of the training plan associated with this cluster instance group.

" + }, "OverrideVpcConfig":{"shape":"VpcConfig"} }, "documentation":"

Details of an instance group in a SageMaker HyperPod cluster.

" @@ -7415,6 +7706,10 @@ "shape":"OnStartDeepHealthChecks", "documentation":"

A flag indicating whether deep health checks should be performed when the cluster instance group is created or updated.

" }, + "TrainingPlanArn":{ + "shape":"TrainingPlanArn", + "documentation":"

The Amazon Resource Name (ARN); of the training plan to use for this cluster instance group.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" + }, "OverrideVpcConfig":{"shape":"VpcConfig"} }, "documentation":"

The specifications of an instance group that you need to define.

" @@ -7540,7 +7835,9 @@ "ml.g6e.12xlarge", "ml.g6e.24xlarge", "ml.g6e.48xlarge", - "ml.p5e.48xlarge" + "ml.p5e.48xlarge", + "ml.p5en.48xlarge", + "ml.trn2.48xlarge" ] }, "ClusterLifeCycleConfig":{ @@ -7719,6 +8016,71 @@ "type":"string", "pattern":"^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$" }, + "ClusterSchedulerConfigArn":{ + "type":"string", + "max":256, + "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:cluster-scheduler-config/[a-z0-9]{12}$" + }, + "ClusterSchedulerConfigId":{ + "type":"string", + "max":12, + "pattern":"^[a-z0-9]{12}$" + }, + "ClusterSchedulerConfigSummary":{ + "type":"structure", + "required":[ + "ClusterSchedulerConfigArn", + "ClusterSchedulerConfigId", + "Name", + "CreationTime", + "Status" + ], + "members":{ + "ClusterSchedulerConfigArn":{ + "shape":"ClusterSchedulerConfigArn", + "documentation":"

ARN of the cluster policy.

" + }, + "ClusterSchedulerConfigId":{ + "shape":"ClusterSchedulerConfigId", + "documentation":"

ID of the cluster policy.

" + }, + "ClusterSchedulerConfigVersion":{ + "shape":"Integer", + "documentation":"

Version of the cluster policy.

" + }, + "Name":{ + "shape":"EntityName", + "documentation":"

Name of the cluster policy.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

Creation time of the cluster policy.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

Last modified time of the cluster policy.

" + }, + "Status":{ + "shape":"SchedulerResourceStatus", + "documentation":"

Status of the cluster policy.

" + }, + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

ARN of the cluster.

" + } + }, + "documentation":"

Summary of the cluster policy.

" + }, + "ClusterSchedulerConfigSummaryList":{ + "type":"list", + "member":{"shape":"ClusterSchedulerConfigSummary"}, + "max":100, + "min":0 + }, + "ClusterSchedulerPriorityClassName":{ + "type":"string", + "pattern":"^[a-z0-9]([-a-z0-9]*[a-z0-9]){0,39}?$" + }, "ClusterSortBy":{ "type":"string", "enum":[ @@ -7766,6 +8128,10 @@ "ClusterStatus":{ "shape":"ClusterStatus", "documentation":"

The status of the SageMaker HyperPod cluster.

" + }, + "TrainingPlanArns":{ + "shape":"TrainingPlanArns", + "documentation":"

A list of Amazon Resource Names (ARNs) of the training plans associated with this cluster.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" } }, "documentation":"

Lists a summary of the properties of a SageMaker HyperPod cluster.

" @@ -8100,6 +8466,140 @@ "type":"list", "member":{"shape":"CompressionType"} }, + "ComputeQuotaArn":{ + "type":"string", + "max":2048, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:compute-quota/[a-z0-9]{12}$" + }, + "ComputeQuotaConfig":{ + "type":"structure", + "members":{ + "ComputeQuotaResources":{ + "shape":"ComputeQuotaResourceConfigList", + "documentation":"

Allocate compute resources by instance types.

" + }, + "ResourceSharingConfig":{ + "shape":"ResourceSharingConfig", + "documentation":"

Resource sharing configuration. This defines how an entity can lend and borrow idle compute with other entities within the cluster.

" + }, + "PreemptTeamTasks":{ + "shape":"PreemptTeamTasks", + "documentation":"

Allows workloads from within an entity to preempt same-team workloads. When set to LowerPriority, the entity's lower priority tasks are preempted by their own higher priority tasks.

Default is LowerPriority.

" + } + }, + "documentation":"

Configuration of the compute allocation definition for an entity. This includes the resource sharing option and the setting to preempt low priority tasks.

" + }, + "ComputeQuotaId":{ + "type":"string", + "pattern":"^[a-z0-9]{12}$" + }, + "ComputeQuotaResourceConfig":{ + "type":"structure", + "required":[ + "InstanceType", + "Count" + ], + "members":{ + "InstanceType":{ + "shape":"ClusterInstanceType", + "documentation":"

The instance type of the instance group for the cluster.

" + }, + "Count":{ + "shape":"InstanceCount", + "documentation":"

The number of instances to add to the instance group of a SageMaker HyperPod cluster.

" + } + }, + "documentation":"

Configuration of the resources used for the compute allocation definition.

" + }, + "ComputeQuotaResourceConfigList":{ + "type":"list", + "member":{"shape":"ComputeQuotaResourceConfig"}, + "max":15, + "min":0 + }, + "ComputeQuotaSummary":{ + "type":"structure", + "required":[ + "ComputeQuotaArn", + "ComputeQuotaId", + "Name", + "Status", + "ComputeQuotaTarget", + "CreationTime" + ], + "members":{ + "ComputeQuotaArn":{ + "shape":"ComputeQuotaArn", + "documentation":"

ARN of the compute allocation definition.

" + }, + "ComputeQuotaId":{ + "shape":"ComputeQuotaId", + "documentation":"

ID of the compute allocation definition.

" + }, + "Name":{ + "shape":"EntityName", + "documentation":"

Name of the compute allocation definition.

" + }, + "ComputeQuotaVersion":{ + "shape":"Integer", + "documentation":"

Version of the compute allocation definition.

" + }, + "Status":{ + "shape":"SchedulerResourceStatus", + "documentation":"

Status of the compute allocation definition.

" + }, + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

ARN of the cluster.

" + }, + "ComputeQuotaConfig":{ + "shape":"ComputeQuotaConfig", + "documentation":"

Configuration of the compute allocation definition. This includes the resource sharing option, and the setting to preempt low priority tasks.

" + }, + "ComputeQuotaTarget":{ + "shape":"ComputeQuotaTarget", + "documentation":"

The target entity to allocate compute resources to.

" + }, + "ActivationState":{ + "shape":"ActivationState", + "documentation":"

The state of the compute allocation being described. Use to enable or disable compute allocation.

Default is Enabled.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

Creation time of the compute allocation definition.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

Last modified time of the compute allocation definition.

" + } + }, + "documentation":"

Summary of the compute allocation definition.

" + }, + "ComputeQuotaSummaryList":{ + "type":"list", + "member":{"shape":"ComputeQuotaSummary"}, + "max":100, + "min":0 + }, + "ComputeQuotaTarget":{ + "type":"structure", + "required":["TeamName"], + "members":{ + "TeamName":{ + "shape":"ComputeQuotaTargetTeamName", + "documentation":"

Name of the team to allocate compute resources to.

" + }, + "FairShareWeight":{ + "shape":"FairShareWeight", + "documentation":"

Assigned entity fair-share weight. Idle compute will be shared across entities based on these assigned weights. This weight is only used when FairShare is enabled.

A weight of 0 is the lowest priority and 100 is the highest. Weight 0 is the default.

" + } + }, + "documentation":"

The target entity to allocate compute resources to.

" + }, + "ComputeQuotaTargetTeamName":{ + "type":"string", + "pattern":"^[a-z0-9]([-a-z0-9]*[a-z0-9]){0,39}?$" + }, "ConditionOutcome":{ "type":"string", "enum":[ @@ -8799,6 +9299,53 @@ } } }, + "CreateClusterSchedulerConfigRequest":{ + "type":"structure", + "required":[ + "Name", + "ClusterArn", + "SchedulerConfig" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

Name for the cluster policy.

" + }, + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

ARN of the cluster.

" + }, + "SchedulerConfig":{ + "shape":"SchedulerConfig", + "documentation":"

Configuration about the monitoring schedule.

" + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

Description of the cluster policy.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags of the cluster policy.

" + } + } + }, + "CreateClusterSchedulerConfigResponse":{ + "type":"structure", + "required":[ + "ClusterSchedulerConfigArn", + "ClusterSchedulerConfigId" + ], + "members":{ + "ClusterSchedulerConfigArn":{ + "shape":"ClusterSchedulerConfigArn", + "documentation":"

ARN of the cluster policy.

" + }, + "ClusterSchedulerConfigId":{ + "shape":"ClusterSchedulerConfigId", + "documentation":"

ID of the cluster policy.

" + } + } + }, "CreateCodeRepositoryInput":{ "type":"structure", "required":[ @@ -8883,6 +9430,62 @@ } } }, + "CreateComputeQuotaRequest":{ + "type":"structure", + "required":[ + "Name", + "ClusterArn", + "ComputeQuotaConfig", + "ComputeQuotaTarget" + ], + "members":{ + "Name":{ + "shape":"EntityName", + "documentation":"

Name to the compute allocation definition.

" + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

Description of the compute allocation definition.

" + }, + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

ARN of the cluster.

" + }, + "ComputeQuotaConfig":{ + "shape":"ComputeQuotaConfig", + "documentation":"

Configuration of the compute allocation definition. This includes the resource sharing option, and the setting to preempt low priority tasks.

" + }, + "ComputeQuotaTarget":{ + "shape":"ComputeQuotaTarget", + "documentation":"

The target entity to allocate compute resources to.

" + }, + "ActivationState":{ + "shape":"ActivationState", + "documentation":"

The state of the compute allocation being described. Use to enable or disable compute allocation.

Default is Enabled.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags of the compute allocation definition.

" + } + } + }, + "CreateComputeQuotaResponse":{ + "type":"structure", + "required":[ + "ComputeQuotaArn", + "ComputeQuotaId" + ], + "members":{ + "ComputeQuotaArn":{ + "shape":"ComputeQuotaArn", + "documentation":"

ARN of the compute allocation definition.

" + }, + "ComputeQuotaId":{ + "shape":"ComputeQuotaId", + "documentation":"

ID of the compute allocation definition.

" + } + } + }, "CreateContextRequest":{ "type":"structure", "required":[ @@ -10585,6 +11188,95 @@ } } }, + "CreatePartnerAppPresignedUrlRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"PartnerAppArn", + "documentation":"

The ARN of the SageMaker Partner AI App to create the presigned URL for.

" + }, + "ExpiresInSeconds":{ + "shape":"ExpiresInSeconds", + "documentation":"

The time that will pass before the presigned URL expires.

" + }, + "SessionExpirationDurationInSeconds":{ + "shape":"SessionExpirationDurationInSeconds", + "documentation":"

Indicates how long the Amazon SageMaker Partner AI App session can be accessed for after logging in.

" + } + } + }, + "CreatePartnerAppPresignedUrlResponse":{ + "type":"structure", + "members":{ + "Url":{ + "shape":"String2048", + "documentation":"

The presigned URL that you can use to access the SageMaker Partner AI App.

" + } + } + }, + "CreatePartnerAppRequest":{ + "type":"structure", + "required":[ + "Name", + "Type", + "ExecutionRoleArn", + "Tier", + "AuthType" + ], + "members":{ + "Name":{ + "shape":"PartnerAppName", + "documentation":"

The name to give the SageMaker Partner AI App.

" + }, + "Type":{ + "shape":"PartnerAppType", + "documentation":"

The type of SageMaker Partner AI App to create. Must be one of the following: lakera-guard, comet, deepchecks-llm-evaluation, or fiddler.

" + }, + "ExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the IAM role that the partner application uses.

" + }, + "MaintenanceConfig":{ + "shape":"PartnerAppMaintenanceConfig", + "documentation":"

Maintenance configuration settings for the SageMaker Partner AI App.

" + }, + "Tier":{ + "shape":"NonEmptyString64", + "documentation":"

Indicates the instance type and size of the cluster attached to the SageMaker Partner AI App.

" + }, + "ApplicationConfig":{ + "shape":"PartnerAppConfig", + "documentation":"

Configuration settings for the SageMaker Partner AI App.

" + }, + "AuthType":{ + "shape":"PartnerAppAuthType", + "documentation":"

The authorization type that users use to access the SageMaker Partner AI App.

" + }, + "EnableIamSessionBasedIdentity":{ + "shape":"Boolean", + "documentation":"

When set to TRUE, the SageMaker Partner AI App sets the Amazon Web Services IAM session name or the authenticated IAM user as the identity of the SageMaker Partner AI App user.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique token that guarantees that the call to this API is idempotent.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Each tag consists of a key and an optional value. Tag keys must be unique per resource.

" + } + } + }, + "CreatePartnerAppResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"PartnerAppArn", + "documentation":"

The ARN of the SageMaker Partner AI App.

" + } + } + }, "CreatePipelineRequest":{ "type":"structure", "required":[ @@ -11029,6 +11721,37 @@ } } }, + "CreateTrainingPlanRequest":{ + "type":"structure", + "required":[ + "TrainingPlanName", + "TrainingPlanOfferingId" + ], + "members":{ + "TrainingPlanName":{ + "shape":"TrainingPlanName", + "documentation":"

The name of the training plan to create.

" + }, + "TrainingPlanOfferingId":{ + "shape":"TrainingPlanOfferingId", + "documentation":"

The unique identifier of the training plan offering to use for creating this plan.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of key-value pairs to apply to this training plan.

" + } + } + }, + "CreateTrainingPlanResponse":{ + "type":"structure", + "required":["TrainingPlanArn"], + "members":{ + "TrainingPlanArn":{ + "shape":"TrainingPlanArn", + "documentation":"

The Amazon Resource Name (ARN); of the created training plan.

" + } + } + }, "CreateTransformJobRequest":{ "type":"structure", "required":[ @@ -11337,12 +12060,17 @@ "max":10, "min":1 }, + "CurrencyCode":{"type":"string"}, "CustomFileSystem":{ "type":"structure", "members":{ "EFSFileSystem":{ "shape":"EFSFileSystem", "documentation":"

A custom file system in Amazon EFS.

" + }, + "FSxLustreFileSystem":{ + "shape":"FSxLustreFileSystem", + "documentation":"

A custom file system in Amazon FSx for Lustre.

" } }, "documentation":"

A file system, created by you, that you assign to a user profile or space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.

", @@ -11354,6 +12082,10 @@ "EFSFileSystemConfig":{ "shape":"EFSFileSystemConfig", "documentation":"

The settings for a custom Amazon EFS file system.

" + }, + "FSxLustreFileSystemConfig":{ + "shape":"FSxLustreFileSystemConfig", + "documentation":"

The settings for a custom Amazon FSx for Lustre file system.

" } }, "documentation":"

The settings for assigning a custom file system to a user profile or space for an Amazon SageMaker Domain. Permitted users can access this file system in Amazon SageMaker Studio.

", @@ -12007,6 +12739,16 @@ } } }, + "DeleteClusterSchedulerConfigRequest":{ + "type":"structure", + "required":["ClusterSchedulerConfigId"], + "members":{ + "ClusterSchedulerConfigId":{ + "shape":"ClusterSchedulerConfigId", + "documentation":"

ID of the cluster policy.

" + } + } + }, "DeleteCodeRepositoryInput":{ "type":"structure", "required":["CodeRepositoryName"], @@ -12027,6 +12769,16 @@ } } }, + "DeleteComputeQuotaRequest":{ + "type":"structure", + "required":["ComputeQuotaId"], + "members":{ + "ComputeQuotaId":{ + "shape":"ComputeQuotaId", + "documentation":"

ID of the compute allocation definition.

" + } + } + }, "DeleteContextRequest":{ "type":"structure", "required":["ContextName"], @@ -12462,6 +13214,30 @@ } } }, + "DeletePartnerAppRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"PartnerAppArn", + "documentation":"

The ARN of the SageMaker Partner AI App to delete.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique token that guarantees that the call to this API is idempotent.

", + "idempotencyToken":true + } + } + }, + "DeletePartnerAppResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"PartnerAppArn", + "documentation":"

The ARN of the SageMaker Partner AI App that was deleted.

" + } + } + }, "DeletePipelineRequest":{ "type":"structure", "required":[ @@ -13402,6 +14178,79 @@ } } }, + "DescribeClusterSchedulerConfigRequest":{ + "type":"structure", + "required":["ClusterSchedulerConfigId"], + "members":{ + "ClusterSchedulerConfigId":{ + "shape":"ClusterSchedulerConfigId", + "documentation":"

ID of the cluster policy.

" + }, + "ClusterSchedulerConfigVersion":{ + "shape":"Integer", + "documentation":"

Version of the cluster policy.

" + } + } + }, + "DescribeClusterSchedulerConfigResponse":{ + "type":"structure", + "required":[ + "ClusterSchedulerConfigArn", + "ClusterSchedulerConfigId", + "Name", + "ClusterSchedulerConfigVersion", + "Status", + "CreationTime" + ], + "members":{ + "ClusterSchedulerConfigArn":{ + "shape":"ClusterSchedulerConfigArn", + "documentation":"

ARN of the cluster policy.

" + }, + "ClusterSchedulerConfigId":{ + "shape":"ClusterSchedulerConfigId", + "documentation":"

ID of the cluster policy.

" + }, + "Name":{ + "shape":"EntityName", + "documentation":"

Name of the cluster policy.

" + }, + "ClusterSchedulerConfigVersion":{ + "shape":"Integer", + "documentation":"

Version of the cluster policy.

" + }, + "Status":{ + "shape":"SchedulerResourceStatus", + "documentation":"

Status of the cluster policy.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

Failure reason of the cluster policy.

" + }, + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

ARN of the cluster where the cluster policy is applied.

" + }, + "SchedulerConfig":{ + "shape":"SchedulerConfig", + "documentation":"

Cluster policy configuration. This policy is used for task prioritization and fair-share allocation. This helps prioritize critical workloads and distributes idle compute across entities.

" + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

Description of the cluster policy.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

Creation time of the cluster policy.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

Last modified time of the cluster policy.

" + }, + "LastModifiedBy":{"shape":"UserContext"} + } + }, "DescribeCodeRepositoryInput":{ "type":"structure", "required":["CodeRepositoryName"], @@ -13543,6 +14392,88 @@ } } }, + "DescribeComputeQuotaRequest":{ + "type":"structure", + "required":["ComputeQuotaId"], + "members":{ + "ComputeQuotaId":{ + "shape":"ComputeQuotaId", + "documentation":"

ID of the compute allocation definition.

" + }, + "ComputeQuotaVersion":{ + "shape":"Integer", + "documentation":"

Version of the compute allocation definition.

" + } + } + }, + "DescribeComputeQuotaResponse":{ + "type":"structure", + "required":[ + "ComputeQuotaArn", + "ComputeQuotaId", + "Name", + "ComputeQuotaVersion", + "Status", + "ComputeQuotaTarget", + "CreationTime" + ], + "members":{ + "ComputeQuotaArn":{ + "shape":"ComputeQuotaArn", + "documentation":"

ARN of the compute allocation definition.

" + }, + "ComputeQuotaId":{ + "shape":"ComputeQuotaId", + "documentation":"

ID of the compute allocation definition.

" + }, + "Name":{ + "shape":"EntityName", + "documentation":"

Name of the compute allocation definition.

" + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

Description of the compute allocation definition.

" + }, + "ComputeQuotaVersion":{ + "shape":"Integer", + "documentation":"

Version of the compute allocation definition.

" + }, + "Status":{ + "shape":"SchedulerResourceStatus", + "documentation":"

Status of the compute allocation definition.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

Failure reason of the compute allocation definition.

" + }, + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

ARN of the cluster.

" + }, + "ComputeQuotaConfig":{ + "shape":"ComputeQuotaConfig", + "documentation":"

Configuration of the compute allocation definition. This includes the resource sharing option, and the setting to preempt low priority tasks.

" + }, + "ComputeQuotaTarget":{ + "shape":"ComputeQuotaTarget", + "documentation":"

The target entity to allocate compute resources to.

" + }, + "ActivationState":{ + "shape":"ActivationState", + "documentation":"

The state of the compute allocation being described. Use to enable or disable compute allocation.

Default is Enabled.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

Creation time of the compute allocation configuration.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

Last modified time of the compute allocation configuration.

" + }, + "LastModifiedBy":{"shape":"UserContext"} + } + }, "DescribeContextRequest":{ "type":"structure", "required":["ContextName"], @@ -16199,6 +17130,77 @@ } } }, + "DescribePartnerAppRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"PartnerAppArn", + "documentation":"

The ARN of the SageMaker Partner AI App to describe.

" + } + } + }, + "DescribePartnerAppResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"PartnerAppArn", + "documentation":"

The ARN of the SageMaker Partner AI App that was described.

" + }, + "Name":{ + "shape":"PartnerAppName", + "documentation":"

The name of the SageMaker Partner AI App.

" + }, + "Type":{ + "shape":"PartnerAppType", + "documentation":"

The type of SageMaker Partner AI App. Must be one of the following: lakera-guard, comet, deepchecks-llm-evaluation, or fiddler.

" + }, + "Status":{ + "shape":"PartnerAppStatus", + "documentation":"

The status of the SageMaker Partner AI App.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time that the SageMaker Partner AI App was created.

" + }, + "ExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the IAM role associated with the SageMaker Partner AI App.

" + }, + "BaseUrl":{ + "shape":"String2048", + "documentation":"

The URL of the SageMaker Partner AI App that the Application SDK uses to support in-app calls for the user.

" + }, + "MaintenanceConfig":{ + "shape":"PartnerAppMaintenanceConfig", + "documentation":"

Maintenance configuration settings for the SageMaker Partner AI App.

" + }, + "Tier":{ + "shape":"NonEmptyString64", + "documentation":"

The instance type and size of the cluster attached to the SageMaker Partner AI App.

" + }, + "Version":{ + "shape":"NonEmptyString64", + "documentation":"

The version of the SageMaker Partner AI App.

" + }, + "ApplicationConfig":{ + "shape":"PartnerAppConfig", + "documentation":"

Configuration settings for the SageMaker Partner AI App.

" + }, + "AuthType":{ + "shape":"PartnerAppAuthType", + "documentation":"

The authorization type that users use to access the SageMaker Partner AI App.

" + }, + "EnableIamSessionBasedIdentity":{ + "shape":"Boolean", + "documentation":"

When set to TRUE, the SageMaker Partner AI App sets the Amazon Web Services IAM session name or the authenticated IAM user as the identity of the SageMaker Partner AI App user.

" + }, + "Error":{ + "shape":"ErrorInfo", + "documentation":"

This is an error field object that contains the error code and the reason for an operation failure.

" + } + } + }, "DescribePipelineDefinitionForExecutionRequest":{ "type":"structure", "required":["PipelineExecutionArn"], @@ -16825,6 +17827,86 @@ } } }, + "DescribeTrainingPlanRequest":{ + "type":"structure", + "required":["TrainingPlanName"], + "members":{ + "TrainingPlanName":{ + "shape":"TrainingPlanName", + "documentation":"

The name of the training plan to describe.

" + } + } + }, + "DescribeTrainingPlanResponse":{ + "type":"structure", + "required":[ + "TrainingPlanArn", + "TrainingPlanName", + "Status" + ], + "members":{ + "TrainingPlanArn":{ + "shape":"TrainingPlanArn", + "documentation":"

The Amazon Resource Name (ARN); of the training plan.

" + }, + "TrainingPlanName":{ + "shape":"TrainingPlanName", + "documentation":"

The name of the training plan.

" + }, + "Status":{ + "shape":"TrainingPlanStatus", + "documentation":"

The current status of the training plan (e.g., Pending, Active, Expired). To see the complete list of status values available for a training plan, refer to the Status attribute within the TrainingPlanSummary object.

" + }, + "StatusMessage":{ + "shape":"TrainingPlanStatusMessage", + "documentation":"

A message providing additional information about the current status of the training plan.

" + }, + "DurationHours":{ + "shape":"TrainingPlanDurationHours", + "documentation":"

The number of whole hours in the total duration for this training plan.

" + }, + "DurationMinutes":{ + "shape":"TrainingPlanDurationMinutes", + "documentation":"

The additional minutes beyond whole hours in the total duration for this training plan.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the training plan.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the training plan.

" + }, + "UpfrontFee":{ + "shape":"String256", + "documentation":"

The upfront fee for the training plan.

" + }, + "CurrencyCode":{ + "shape":"CurrencyCode", + "documentation":"

The currency code for the upfront fee (e.g., USD).

" + }, + "TotalInstanceCount":{ + "shape":"TotalInstanceCount", + "documentation":"

The total number of instances reserved in this training plan.

" + }, + "AvailableInstanceCount":{ + "shape":"AvailableInstanceCount", + "documentation":"

The number of instances currently available for use in this training plan.

" + }, + "InUseInstanceCount":{ + "shape":"InUseInstanceCount", + "documentation":"

The number of instances currently in use from this training plan.

" + }, + "TargetResources":{ + "shape":"SageMakerResourceNames", + "documentation":"

The target resources (e.g., SageMaker Training Jobs, SageMaker HyperPod) that can use this training plan.

Training plans are specific to their target resource.

" + }, + "ReservedCapacitySummaries":{ + "shape":"ReservedCapacitySummaries", + "documentation":"

The list of Reserved Capacity providing the underlying compute resources of the plan.

" + } + } + }, "DescribeTransformJobRequest":{ "type":"structure", "required":["TransformJobName"], @@ -18755,6 +19837,20 @@ "max":1024, "pattern":"[\\S\\s]*" }, + "ErrorInfo":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"NonEmptyString64", + "documentation":"

The error code for an invalid or failed operation.

" + }, + "Reason":{ + "shape":"NonEmptyString256", + "documentation":"

The failure reason for the operation.

" + } + }, + "documentation":"

This is an error field object that contains the error code and the reason for an operation failure.

" + }, "ExcludeFeaturesAttribute":{ "type":"string", "max":100 @@ -18952,6 +20048,32 @@ }, "documentation":"

A parameter to activate explainers.

" }, + "FSxLustreFileSystem":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

Amazon FSx for Lustre file system ID.

" + } + }, + "documentation":"

A custom file system in Amazon FSx for Lustre.

" + }, + "FSxLustreFileSystemConfig":{ + "type":"structure", + "required":["FileSystemId"], + "members":{ + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The globally unique, 17-digit, ID of the file system, assigned by Amazon FSx for Lustre.

" + }, + "FileSystemPath":{ + "shape":"FileSystemPath", + "documentation":"

The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below.

" + } + }, + "documentation":"

The settings for assigning a custom Amazon FSx for Lustre file system to a user profile or space for an Amazon SageMaker Domain.

" + }, "FailStepMetadata":{ "type":"structure", "members":{ @@ -18973,6 +20095,18 @@ "type":"string", "max":1024 }, + "FairShare":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "FairShareWeight":{ + "type":"integer", + "max":100, + "min":0 + }, "FeatureAdditions":{ "type":"list", "member":{"shape":"FeatureDefinition"}, @@ -21471,6 +22605,10 @@ } } }, + "InUseInstanceCount":{ + "type":"integer", + "min":0 + }, "InferenceComponentArn":{ "type":"string", "max":2048, @@ -22131,6 +23269,10 @@ "member":{"shape":"TrainingInputMode"}, "min":1 }, + "InstanceCount":{ + "type":"integer", + "min":1 + }, "InstanceGroup":{ "type":"structure", "required":[ @@ -22165,6 +23307,23 @@ "member":{"shape":"InstanceGroupName"}, "max":5 }, + "InstanceGroupStatus":{ + "type":"string", + "enum":[ + "InService", + "Creating", + "Updating", + "Failed", + "Degraded", + "SystemUpdating", + "Deleting" + ] + }, + "InstanceGroupTrainingPlanStatus":{ + "type":"string", + "max":63, + "min":1 + }, "InstanceGroups":{ "type":"list", "member":{"shape":"InstanceGroup"}, @@ -23583,6 +24742,60 @@ } } }, + "ListClusterSchedulerConfigsRequest":{ + "type":"structure", + "members":{ + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

Filter for after this creation time. The input for this parameter is a Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

Filter for before this creation time. The input for this parameter is a Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter.

" + }, + "NameContains":{ + "shape":"EntityName", + "documentation":"

Filter for name containing this string.

" + }, + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

Filter for ARN of the cluster.

" + }, + "Status":{ + "shape":"SchedulerResourceStatus", + "documentation":"

Filter for status.

" + }, + "SortBy":{ + "shape":"SortClusterSchedulerConfigBy", + "documentation":"

Filter for sorting the list by a given value. For example, sort by name, creation time, or status.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The order of the list. By default, listed in Descending order according to by SortBy. To change the list order, you can specify SortOrder to be Ascending.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of cluster policies to list.

" + } + } + }, + "ListClusterSchedulerConfigsResponse":{ + "type":"structure", + "members":{ + "ClusterSchedulerConfigSummaries":{ + "shape":"ClusterSchedulerConfigSummaryList", + "documentation":"

Summaries of the cluster policies.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + } + } + }, "ListClustersRequest":{ "type":"structure", "members":{ @@ -23613,6 +24826,10 @@ "SortOrder":{ "shape":"SortOrder", "documentation":"

The sort order for results. The default value is Ascending.

" + }, + "TrainingPlanArn":{ + "shape":"TrainingPlanArn", + "documentation":"

The Amazon Resource Name (ARN); of the training plan to filter clusters by. For more information about reserving GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" } } }, @@ -23756,6 +24973,60 @@ "Status" ] }, + "ListComputeQuotasRequest":{ + "type":"structure", + "members":{ + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

Filter for after this creation time. The input for this parameter is a Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

Filter for before this creation time. The input for this parameter is a Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter.

" + }, + "NameContains":{ + "shape":"EntityName", + "documentation":"

Filter for name containing this string.

" + }, + "Status":{ + "shape":"SchedulerResourceStatus", + "documentation":"

Filter for status.

" + }, + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

Filter for ARN of the cluster.

" + }, + "SortBy":{ + "shape":"SortQuotaBy", + "documentation":"

Filter for sorting the list by a given value. For example, sort by name, creation time, or status.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

The order of the list. By default, listed in Descending order according to by SortBy. To change the list order, you can specify SortOrder to be Ascending.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of compute allocation definitions to list.

" + } + } + }, + "ListComputeQuotasResponse":{ + "type":"structure", + "members":{ + "ComputeQuotaSummaries":{ + "shape":"ComputeQuotaSummaryList", + "documentation":"

Summaries of the compute allocation definitions.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + } + } + }, "ListContextsRequest":{ "type":"structure", "members":{ @@ -26160,6 +27431,32 @@ "Status" ] }, + "ListPartnerAppsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

This parameter defines the maximum number of results that can be returned in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + } + } + }, + "ListPartnerAppsResponse":{ + "type":"structure", + "members":{ + "Summaries":{ + "shape":"PartnerAppSummaries", + "documentation":"

The information related to each of the SageMaker Partner AI Apps in an account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + } + } + }, "ListPipelineExecutionStepsRequest":{ "type":"structure", "members":{ @@ -26773,6 +28070,10 @@ "WarmPoolStatusEquals":{ "shape":"WarmPoolResourceStatus", "documentation":"

A filter that retrieves only training jobs with a specific warm pool status.

" + }, + "TrainingPlanArnEquals":{ + "shape":"TrainingPlanArn", + "documentation":"

The Amazon Resource Name (ARN); of the training plan to filter training jobs by. For more information about reserving GPU capacity for your SageMaker training jobs using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" } } }, @@ -26790,6 +28091,54 @@ } } }, + "ListTrainingPlansRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to continue pagination if more results are available.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in the response.

", + "box":true + }, + "StartTimeAfter":{ + "shape":"Timestamp", + "documentation":"

Filter to list only training plans with an actual start time after this date.

" + }, + "StartTimeBefore":{ + "shape":"Timestamp", + "documentation":"

Filter to list only training plans with an actual start time before this date.

" + }, + "SortBy":{ + "shape":"TrainingPlanSortBy", + "documentation":"

The training plan field to sort the results by (e.g., StartTime, Status).

" + }, + "SortOrder":{ + "shape":"TrainingPlanSortOrder", + "documentation":"

The order to sort the results (Ascending or Descending).

" + }, + "Filters":{ + "shape":"TrainingPlanFilters", + "documentation":"

Additional filters to apply to the list of training plans.

" + } + } + }, + "ListTrainingPlansResponse":{ + "type":"structure", + "required":["TrainingPlanSummaries"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to continue pagination if more results are available.

" + }, + "TrainingPlanSummaries":{ + "shape":"TrainingPlanSummaries", + "documentation":"

A list of summary information for the training plans.

" + } + } + }, "ListTransformJobsRequest":{ "type":"structure", "members":{ @@ -27415,7 +28764,12 @@ "Endpoints", "Projects", "InferenceOptimization", - "PerformanceEvaluation" + "PerformanceEvaluation", + "HyperPodClusters", + "LakeraGuard", + "Comet", + "DeepchecksLLMEvaluation", + "Fiddler" ] }, "MlflowVersion":{ @@ -30936,6 +32290,110 @@ "type":"list", "member":{"shape":"Parent"} }, + "PartnerAppAdminUserList":{ + "type":"list", + "member":{"shape":"NonEmptyString256"}, + "max":5, + "min":0 + }, + "PartnerAppArguments":{ + "type":"map", + "key":{"shape":"NonEmptyString256"}, + "value":{"shape":"String1024"}, + "max":5, + "min":0 + }, + "PartnerAppArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:partner-app\\/app-[A-Z0-9]{12}$" + }, + "PartnerAppAuthType":{ + "type":"string", + "enum":["IAM"] + }, + "PartnerAppConfig":{ + "type":"structure", + "members":{ + "AdminUsers":{ + "shape":"PartnerAppAdminUserList", + "documentation":"

The list of users that are given admin access to the SageMaker Partner AI App.

" + }, + "Arguments":{ + "shape":"PartnerAppArguments", + "documentation":"

This is a map of required inputs for a SageMaker Partner AI App. Based on the application type, the map is populated with a key and value pair that is specific to the user and application.

" + } + }, + "documentation":"

Configuration settings for the SageMaker Partner AI App.

" + }, + "PartnerAppMaintenanceConfig":{ + "type":"structure", + "members":{ + "MaintenanceWindowStart":{ + "shape":"WeeklyScheduleTimeFormat", + "documentation":"

The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. This value must take the following format: 3-letter-day:24-h-hour:minute. For example: TUE:03:30.

" + } + }, + "documentation":"

Maintenance configuration settings for the SageMaker Partner AI App.

" + }, + "PartnerAppName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9]+" + }, + "PartnerAppStatus":{ + "type":"string", + "enum":[ + "Creating", + "Updating", + "Deleting", + "Available", + "Failed", + "UpdateFailed", + "Deleted" + ] + }, + "PartnerAppSummaries":{ + "type":"list", + "member":{"shape":"PartnerAppSummary"} + }, + "PartnerAppSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"PartnerAppArn", + "documentation":"

The ARN of the SageMaker Partner AI App.

" + }, + "Name":{ + "shape":"PartnerAppName", + "documentation":"

The name of the SageMaker Partner AI App.

" + }, + "Type":{ + "shape":"PartnerAppType", + "documentation":"

The type of SageMaker Partner AI App to create. Must be one of the following: lakera-guard, comet, deepchecks-llm-evaluation, or fiddler.

" + }, + "Status":{ + "shape":"PartnerAppStatus", + "documentation":"

The status of the SageMaker Partner AI App.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of the SageMaker Partner AI App.

" + } + }, + "documentation":"

A subset of information related to a SageMaker Partner AI App. This information is used as part of the ListPartnerApps API response.

" + }, + "PartnerAppType":{ + "type":"string", + "enum":[ + "lakera-guard", + "comet", + "deepchecks-llm-evaluation", + "fiddler" + ] + }, "PendingDeploymentSummary":{ "type":"structure", "required":["EndpointConfigName"], @@ -31492,7 +32950,43 @@ }, "documentation":"

A specification for a predefined metric.

" }, + "PreemptTeamTasks":{ + "type":"string", + "enum":[ + "Never", + "LowerPriority" + ] + }, "PresignedDomainUrl":{"type":"string"}, + "PriorityClass":{ + "type":"structure", + "required":[ + "Name", + "Weight" + ], + "members":{ + "Name":{ + "shape":"ClusterSchedulerPriorityClassName", + "documentation":"

Name of the priority class.

" + }, + "Weight":{ + "shape":"PriorityWeight", + "documentation":"

Weight of the priority class. The value is within a range from 0 to 100, where 0 is the default.

A weight of 0 is the lowest priority and 100 is the highest. Weight 0 is the default.

" + } + }, + "documentation":"

Priority class configuration. When included in PriorityClasses, these class configurations define how tasks are queued.

" + }, + "PriorityClassList":{ + "type":"list", + "member":{"shape":"PriorityClass"}, + "max":10, + "min":0 + }, + "PriorityWeight":{ + "type":"integer", + "max":100, + "min":0 + }, "ProbabilityThresholdAttribute":{"type":"double"}, "ProblemType":{ "type":"string", @@ -33762,6 +35256,145 @@ "max":1024, "pattern":"^https://([.\\-_a-zA-Z0-9]+/?){3,1016}$" }, + "ReservedCapacityArn":{ + "type":"string", + "max":2048, + "min":50, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:reserved-capacity/.*" + }, + "ReservedCapacityDurationHours":{ + "type":"long", + "max":87600, + "min":0 + }, + "ReservedCapacityDurationMinutes":{ + "type":"long", + "max":59, + "min":0 + }, + "ReservedCapacityInstanceCount":{ + "type":"integer", + "max":256, + "min":1 + }, + "ReservedCapacityInstanceType":{ + "type":"string", + "enum":[ + "ml.p4d.24xlarge", + "ml.p5.48xlarge", + "ml.p5e.48xlarge", + "ml.p5en.48xlarge", + "ml.trn2.48xlarge" + ] + }, + "ReservedCapacityOffering":{ + "type":"structure", + "required":[ + "InstanceType", + "InstanceCount" + ], + "members":{ + "InstanceType":{ + "shape":"ReservedCapacityInstanceType", + "documentation":"

The instance type for the reserved capacity offering.

" + }, + "InstanceCount":{ + "shape":"ReservedCapacityInstanceCount", + "documentation":"

The number of instances in the reserved capacity offering.

" + }, + "AvailabilityZone":{ + "shape":"AvailabilityZone", + "documentation":"

The availability zone for the reserved capacity offering.

" + }, + "DurationHours":{ + "shape":"ReservedCapacityDurationHours", + "documentation":"

The number of whole hours in the total duration for this reserved capacity offering.

" + }, + "DurationMinutes":{ + "shape":"ReservedCapacityDurationMinutes", + "documentation":"

The additional minutes beyond whole hours in the total duration for this reserved capacity offering.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the reserved capacity offering.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the reserved capacity offering.

" + } + }, + "documentation":"

Details about a reserved capacity offering for a training plan offering.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" + }, + "ReservedCapacityOfferings":{ + "type":"list", + "member":{"shape":"ReservedCapacityOffering"}, + "max":5, + "min":0 + }, + "ReservedCapacityStatus":{ + "type":"string", + "enum":[ + "Pending", + "Active", + "Scheduled", + "Expired", + "Failed" + ] + }, + "ReservedCapacitySummaries":{ + "type":"list", + "member":{"shape":"ReservedCapacitySummary"}, + "max":5, + "min":0 + }, + "ReservedCapacitySummary":{ + "type":"structure", + "required":[ + "ReservedCapacityArn", + "InstanceType", + "TotalInstanceCount", + "Status" + ], + "members":{ + "ReservedCapacityArn":{ + "shape":"ReservedCapacityArn", + "documentation":"

The Amazon Resource Name (ARN); of the reserved capacity.

" + }, + "InstanceType":{ + "shape":"ReservedCapacityInstanceType", + "documentation":"

The instance type for the reserved capacity.

" + }, + "TotalInstanceCount":{ + "shape":"TotalInstanceCount", + "documentation":"

The total number of instances in the reserved capacity.

" + }, + "Status":{ + "shape":"ReservedCapacityStatus", + "documentation":"

The current status of the reserved capacity.

" + }, + "AvailabilityZone":{ + "shape":"AvailabilityZone", + "documentation":"

The availability zone for the reserved capacity.

" + }, + "DurationHours":{ + "shape":"ReservedCapacityDurationHours", + "documentation":"

The number of whole hours in the total duration for this reserved capacity.

" + }, + "DurationMinutes":{ + "shape":"ReservedCapacityDurationMinutes", + "documentation":"

The additional minutes beyond whole hours in the total duration for this reserved capacity.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the reserved capacity.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the reserved capacity.

" + } + }, + "documentation":"

Details of a reserved capacity for the training plan.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" + }, "ResolvedAttributes":{ "type":"structure", "members":{ @@ -33863,6 +35496,10 @@ "InstanceGroups":{ "shape":"InstanceGroups", "documentation":"

The configuration of a heterogeneous cluster in JSON format.

" + }, + "TrainingPlanArn":{ + "shape":"TrainingPlanArn", + "documentation":"

The Amazon Resource Name (ARN); of the training plan to use for this resource configuration.

" } }, "documentation":"

Describes the resources, including machine learning (ML) compute instances and ML storage volumes, to use for model training.

" @@ -33941,6 +35578,29 @@ "documentation":"Optional. Indicates how many seconds the resource stayed in ResourceRetained state. Populated only after resource reaches ResourceReused or ResourceReleased state.", "min":0 }, + "ResourceSharingConfig":{ + "type":"structure", + "required":["Strategy"], + "members":{ + "Strategy":{ + "shape":"ResourceSharingStrategy", + "documentation":"

The strategy of how idle compute is shared within the cluster. The following are the options of strategies.

Default is LendandBorrow.

" + }, + "BorrowLimit":{ + "shape":"BorrowLimit", + "documentation":"

The limit on how much idle compute can be borrowed.The values can be 1 - 500 percent of idle compute that the team is allowed to borrow.

Default is 50.

" + } + }, + "documentation":"

Resource sharing configuration.

" + }, + "ResourceSharingStrategy":{ + "type":"string", + "enum":[ + "Lend", + "DontLend", + "LendAndBorrow" + ] + }, "ResourceSpec":{ "type":"structure", "members":{ @@ -34277,6 +35937,18 @@ "max":255, "pattern":"^arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:aws:hub-content\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}\\/Model\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}$" }, + "SageMakerResourceName":{ + "type":"string", + "enum":[ + "training-job", + "hyperpod-cluster" + ] + }, + "SageMakerResourceNames":{ + "type":"list", + "member":{"shape":"SageMakerResourceName"}, + "min":1 + }, "SagemakerServicecatalogStatus":{ "type":"string", "enum":[ @@ -34371,6 +36043,37 @@ "Stopped" ] }, + "SchedulerConfig":{ + "type":"structure", + "members":{ + "PriorityClasses":{ + "shape":"PriorityClassList", + "documentation":"

List of the priority classes, PriorityClass, of the cluster policy. When specified, these class configurations define how tasks are queued.

" + }, + "FairShare":{ + "shape":"FairShare", + "documentation":"

When enabled, entities borrow idle compute based on their assigned FairShareWeight.

When disabled, entities borrow idle compute based on a first-come first-serve basis.

Default is Enabled.

" + } + }, + "documentation":"

Cluster policy configuration. This policy is used for task prioritization and fair-share allocation. This helps prioritize critical workloads and distributes idle compute across entities.

" + }, + "SchedulerResourceStatus":{ + "type":"string", + "enum":[ + "Creating", + "CreateFailed", + "CreateRollbackFailed", + "Created", + "Updating", + "UpdateFailed", + "UpdateRollbackFailed", + "Updated", + "Deleting", + "DeleteFailed", + "DeleteRollbackFailed", + "Deleted" + ] + }, "Scope":{ "type":"string", "max":1024, @@ -34512,6 +36215,50 @@ "Descending" ] }, + "SearchTrainingPlanOfferingsRequest":{ + "type":"structure", + "required":[ + "InstanceType", + "InstanceCount", + "TargetResources" + ], + "members":{ + "InstanceType":{ + "shape":"ReservedCapacityInstanceType", + "documentation":"

The type of instance you want to search for in the available training plan offerings. This field allows you to filter the search results based on the specific compute resources you require for your SageMaker training jobs or SageMaker HyperPod clusters. When searching for training plan offerings, specifying the instance type helps you find Reserved Instances that match your computational needs.

" + }, + "InstanceCount":{ + "shape":"ReservedCapacityInstanceCount", + "documentation":"

The number of instances you want to reserve in the training plan offerings. This allows you to specify the quantity of compute resources needed for your SageMaker training jobs or SageMaker HyperPod clusters, helping you find reserved capacity offerings that match your requirements.

" + }, + "StartTimeAfter":{ + "shape":"Timestamp", + "documentation":"

A filter to search for training plan offerings with a start time after a specified date.

" + }, + "EndTimeBefore":{ + "shape":"Timestamp", + "documentation":"

A filter to search for reserved capacity offerings with an end time before a specified date.

" + }, + "DurationHours":{ + "shape":"TrainingPlanDurationHoursInput", + "documentation":"

The desired duration in hours for the training plan offerings.

" + }, + "TargetResources":{ + "shape":"SageMakerResourceNames", + "documentation":"

The target resources (e.g., SageMaker Training Jobs, SageMaker HyperPod) to search for in the offerings.

Training plans are specific to their target resource.

" + } + } + }, + "SearchTrainingPlanOfferingsResponse":{ + "type":"structure", + "required":["TrainingPlanOfferings"], + "members":{ + "TrainingPlanOfferings":{ + "shape":"TrainingPlanOfferings", + "documentation":"

A list of training plan offerings that match the search criteria.

" + } + } + }, "SecondaryStatus":{ "type":"string", "enum":[ @@ -34893,6 +36640,14 @@ "Status" ] }, + "SortClusterSchedulerConfigBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime", + "Status" + ] + }, "SortContextsBy":{ "type":"string", "enum":[ @@ -34943,6 +36698,15 @@ "CreationTime" ] }, + "SortQuotaBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime", + "Status", + "ClusterArn" + ] + }, "SortTrackingServerBy":{ "type":"string", "enum":[ @@ -35700,6 +37464,10 @@ "min":1, "pattern":".+" }, + "String2048":{ + "type":"string", + "max":2048 + }, "String256":{ "type":"string", "max":256 @@ -36424,6 +38192,10 @@ "max":256, "min":1 }, + "TotalInstanceCount":{ + "type":"integer", + "min":0 + }, "TrackingServerArn":{ "type":"string", "max":2048, @@ -36673,6 +38445,7 @@ "ml.p4de.24xlarge", "ml.p5.48xlarge", "ml.p5e.48xlarge", + "ml.p5en.48xlarge", "ml.c5.xlarge", "ml.c5.2xlarge", "ml.c5.4xlarge", @@ -36710,6 +38483,7 @@ "ml.trn1.2xlarge", "ml.trn1.32xlarge", "ml.trn1n.32xlarge", + "ml.trn2.48xlarge", "ml.m6i.large", "ml.m6i.xlarge", "ml.m6i.2xlarge", @@ -37051,10 +38825,235 @@ "WarmPoolStatus":{ "shape":"WarmPoolStatus", "documentation":"

The status of the warm pool associated with the training job.

" + }, + "TrainingPlanArn":{ + "shape":"TrainingPlanArn", + "documentation":"

The Amazon Resource Name (ARN); of the training plan associated with this training job.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" } }, "documentation":"

Provides summary information about a training job.

" }, + "TrainingPlanArn":{ + "type":"string", + "max":2048, + "min":50, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:training-plan/.*" + }, + "TrainingPlanArns":{ + "type":"list", + "member":{"shape":"TrainingPlanArn"} + }, + "TrainingPlanDurationHours":{ + "type":"long", + "max":87600, + "min":0 + }, + "TrainingPlanDurationHoursInput":{ + "type":"long", + "max":87600, + "min":1 + }, + "TrainingPlanDurationMinutes":{ + "type":"long", + "max":59, + "min":0 + }, + "TrainingPlanFilter":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"TrainingPlanFilterName", + "documentation":"

The name of the filter field (e.g., Status, InstanceType).

" + }, + "Value":{ + "shape":"String64", + "documentation":"

The value to filter by for the specified field.

" + } + }, + "documentation":"

A filter to apply when listing or searching for training plans.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" + }, + "TrainingPlanFilterName":{ + "type":"string", + "enum":["Status"] + }, + "TrainingPlanFilters":{ + "type":"list", + "member":{"shape":"TrainingPlanFilter"}, + "max":5, + "min":1 + }, + "TrainingPlanName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" + }, + "TrainingPlanOffering":{ + "type":"structure", + "required":[ + "TrainingPlanOfferingId", + "TargetResources" + ], + "members":{ + "TrainingPlanOfferingId":{ + "shape":"TrainingPlanOfferingId", + "documentation":"

The unique identifier for this training plan offering.

" + }, + "TargetResources":{ + "shape":"SageMakerResourceNames", + "documentation":"

The target resources (e.g., SageMaker Training Jobs, SageMaker HyperPod) for this training plan offering.

Training plans are specific to their target resource.

" + }, + "RequestedStartTimeAfter":{ + "shape":"Timestamp", + "documentation":"

The requested start time that the user specified when searching for the training plan offering.

" + }, + "RequestedEndTimeBefore":{ + "shape":"Timestamp", + "documentation":"

The requested end time that the user specified when searching for the training plan offering.

" + }, + "DurationHours":{ + "shape":"TrainingPlanDurationHours", + "documentation":"

The number of whole hours in the total duration for this training plan offering.

" + }, + "DurationMinutes":{ + "shape":"TrainingPlanDurationMinutes", + "documentation":"

The additional minutes beyond whole hours in the total duration for this training plan offering.

" + }, + "UpfrontFee":{ + "shape":"String256", + "documentation":"

The upfront fee for this training plan offering.

" + }, + "CurrencyCode":{ + "shape":"CurrencyCode", + "documentation":"

The currency code for the upfront fee (e.g., USD).

" + }, + "ReservedCapacityOfferings":{ + "shape":"ReservedCapacityOfferings", + "documentation":"

A list of reserved capacity offerings associated with this training plan offering.

" + } + }, + "documentation":"

Details about a training plan offering.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" + }, + "TrainingPlanOfferingId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-z0-9\\-]+$" + }, + "TrainingPlanOfferings":{ + "type":"list", + "member":{"shape":"TrainingPlanOffering"}, + "min":0 + }, + "TrainingPlanSortBy":{ + "type":"string", + "enum":[ + "TrainingPlanName", + "StartTime", + "Status" + ] + }, + "TrainingPlanSortOrder":{ + "type":"string", + "enum":[ + "Ascending", + "Descending" + ] + }, + "TrainingPlanStatus":{ + "type":"string", + "enum":[ + "Pending", + "Active", + "Scheduled", + "Expired", + "Failed" + ] + }, + "TrainingPlanStatusMessage":{ + "type":"string", + "max":1024, + "min":0 + }, + "TrainingPlanSummaries":{ + "type":"list", + "member":{"shape":"TrainingPlanSummary"} + }, + "TrainingPlanSummary":{ + "type":"structure", + "required":[ + "TrainingPlanArn", + "TrainingPlanName", + "Status" + ], + "members":{ + "TrainingPlanArn":{ + "shape":"TrainingPlanArn", + "documentation":"

The Amazon Resource Name (ARN); of the training plan.

" + }, + "TrainingPlanName":{ + "shape":"TrainingPlanName", + "documentation":"

The name of the training plan.

" + }, + "Status":{ + "shape":"TrainingPlanStatus", + "documentation":"

The current status of the training plan (e.g., Pending, Active, Expired). To see the complete list of status values available for a training plan, refer to the Status attribute within the TrainingPlanSummary object.

" + }, + "StatusMessage":{ + "shape":"TrainingPlanStatusMessage", + "documentation":"

A message providing additional information about the current status of the training plan.

" + }, + "DurationHours":{ + "shape":"TrainingPlanDurationHours", + "documentation":"

The number of whole hours in the total duration for this training plan.

" + }, + "DurationMinutes":{ + "shape":"TrainingPlanDurationMinutes", + "documentation":"

The additional minutes beyond whole hours in the total duration for this training plan.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the training plan.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the training plan.

" + }, + "UpfrontFee":{ + "shape":"String256", + "documentation":"

The upfront fee for the training plan.

" + }, + "CurrencyCode":{ + "shape":"CurrencyCode", + "documentation":"

The currency code for the upfront fee (e.g., USD).

" + }, + "TotalInstanceCount":{ + "shape":"TotalInstanceCount", + "documentation":"

The total number of instances reserved in this training plan.

" + }, + "AvailableInstanceCount":{ + "shape":"AvailableInstanceCount", + "documentation":"

The number of instances currently available for use in this training plan.

" + }, + "InUseInstanceCount":{ + "shape":"InUseInstanceCount", + "documentation":"

The number of instances currently in use from this training plan.

" + }, + "TargetResources":{ + "shape":"SageMakerResourceNames", + "documentation":"

The target resources (e.g., training jobs, HyperPod clusters) that can use this training plan.

Training plans are specific to their target resource.

" + }, + "ReservedCapacitySummaries":{ + "shape":"ReservedCapacitySummaries", + "documentation":"

A list of reserved capacities associated with this training plan, including details such as instance types, counts, and availability zones.

" + } + }, + "documentation":"

Details of the training plan.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" + }, "TrainingRepositoryAccessMode":{ "type":"string", "enum":[ @@ -38237,6 +40236,48 @@ } } }, + "UpdateClusterSchedulerConfigRequest":{ + "type":"structure", + "required":[ + "ClusterSchedulerConfigId", + "TargetVersion" + ], + "members":{ + "ClusterSchedulerConfigId":{ + "shape":"ClusterSchedulerConfigId", + "documentation":"

ID of the cluster policy.

" + }, + "TargetVersion":{ + "shape":"Integer", + "documentation":"

Target version.

" + }, + "SchedulerConfig":{ + "shape":"SchedulerConfig", + "documentation":"

Cluster policy configuration.

" + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

Description of the cluster policy.

" + } + } + }, + "UpdateClusterSchedulerConfigResponse":{ + "type":"structure", + "required":[ + "ClusterSchedulerConfigArn", + "ClusterSchedulerConfigVersion" + ], + "members":{ + "ClusterSchedulerConfigArn":{ + "shape":"ClusterSchedulerConfigArn", + "documentation":"

ARN of the cluster policy.

" + }, + "ClusterSchedulerConfigVersion":{ + "shape":"Integer", + "documentation":"

Version of the cluster policy.

" + } + } + }, "UpdateClusterSoftwareRequest":{ "type":"structure", "required":["ClusterName"], @@ -38281,6 +40322,56 @@ } } }, + "UpdateComputeQuotaRequest":{ + "type":"structure", + "required":[ + "ComputeQuotaId", + "TargetVersion" + ], + "members":{ + "ComputeQuotaId":{ + "shape":"ComputeQuotaId", + "documentation":"

ID of the compute allocation definition.

" + }, + "TargetVersion":{ + "shape":"Integer", + "documentation":"

Target version.

" + }, + "ComputeQuotaConfig":{ + "shape":"ComputeQuotaConfig", + "documentation":"

Configuration of the compute allocation definition. This includes the resource sharing option, and the setting to preempt low priority tasks.

" + }, + "ComputeQuotaTarget":{ + "shape":"ComputeQuotaTarget", + "documentation":"

The target entity to allocate compute resources to.

" + }, + "ActivationState":{ + "shape":"ActivationState", + "documentation":"

The state of the compute allocation being described. Use to enable or disable compute allocation.

Default is Enabled.

" + }, + "Description":{ + "shape":"EntityDescription", + "documentation":"

Description of the compute allocation definition.

" + } + } + }, + "UpdateComputeQuotaResponse":{ + "type":"structure", + "required":[ + "ComputeQuotaArn", + "ComputeQuotaVersion" + ], + "members":{ + "ComputeQuotaArn":{ + "shape":"ComputeQuotaArn", + "documentation":"

ARN of the compute allocation definition.

" + }, + "ComputeQuotaVersion":{ + "shape":"Integer", + "documentation":"

Version of the compute allocation definition.

" + } + } + }, "UpdateContextRequest":{ "type":"structure", "required":["ContextName"], @@ -39066,6 +41157,51 @@ "members":{ } }, + "UpdatePartnerAppRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"PartnerAppArn", + "documentation":"

The ARN of the SageMaker Partner AI App to update.

" + }, + "MaintenanceConfig":{ + "shape":"PartnerAppMaintenanceConfig", + "documentation":"

Maintenance configuration settings for the SageMaker Partner AI App.

" + }, + "Tier":{ + "shape":"NonEmptyString64", + "documentation":"

Indicates the instance type and size of the cluster attached to the SageMaker Partner AI App.

" + }, + "ApplicationConfig":{ + "shape":"PartnerAppConfig", + "documentation":"

Configuration settings for the SageMaker Partner AI App.

" + }, + "EnableIamSessionBasedIdentity":{ + "shape":"Boolean", + "documentation":"

When set to TRUE, the SageMaker Partner AI App sets the Amazon Web Services IAM session name or the authenticated IAM user as the identity of the SageMaker Partner AI App user.

", + "box":true + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique token that guarantees that the call to this API is idempotent.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Each tag consists of a key and an optional value. Tag keys must be unique per resource.

" + } + } + }, + "UpdatePartnerAppResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"PartnerAppArn", + "documentation":"

The ARN of the SageMaker Partner AI App that was updated.

" + } + } + }, "UpdatePipelineExecutionRequest":{ "type":"structure", "required":["PipelineExecutionArn"], @@ -39820,6 +41956,11 @@ "max":9, "pattern":"(Mon|Tue|Wed|Thu|Fri|Sat|Sun):([01]\\d|2[0-3]):([0-5]\\d)" }, + "WeeklyScheduleTimeFormat":{ + "type":"string", + "max":9, + "pattern":"(Mon|Tue|Wed|Thu|Fri|Sat|Sun):([01]\\d|2[0-3]):([0-5]\\d)" + }, "WorkerAccessConfiguration":{ "type":"structure", "members":{ diff --git a/tests/functional/endpoint-rules/bedrock-data-automation-runtime/endpoint-tests-1.json b/tests/functional/endpoint-rules/bedrock-data-automation-runtime/endpoint-tests-1.json new file mode 100644 index 0000000000..4c05a7b7fa --- /dev/null +++ b/tests/functional/endpoint-rules/bedrock-data-automation-runtime/endpoint-tests-1.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-runtime.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/endpoint-rules/bedrock-data-automation/endpoint-tests-1.json b/tests/functional/endpoint-rules/bedrock-data-automation/endpoint-tests-1.json new file mode 100644 index 0000000000..914a5460a4 --- /dev/null +++ b/tests/functional/endpoint-rules/bedrock-data-automation/endpoint-tests-1.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://bedrock-data-automation.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file From 354fe4ff4d58abd277e778358e00f049c42fa49a Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 4 Dec 2024 19:09:49 +0000 Subject: [PATCH 2/2] Bumping version to 1.35.75 --- .changes/1.35.75.json | 42 +++++++++++++++++++ .../api-change-bedrock-13860.json | 5 --- .../api-change-bedrockagent-72999.json | 5 --- .../api-change-bedrockagentruntime-2027.json | 5 --- ...pi-change-bedrockdataautomation-77172.json | 5 --- ...ge-bedrockdataautomationruntime-71717.json | 5 --- .../api-change-bedrockruntime-67114.json | 5 --- .../next-release/api-change-kendra-74397.json | 5 --- .../api-change-sagemaker-9454.json | 5 --- CHANGELOG.rst | 13 ++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 12 files changed, 57 insertions(+), 42 deletions(-) create mode 100644 .changes/1.35.75.json delete mode 100644 .changes/next-release/api-change-bedrock-13860.json delete mode 100644 .changes/next-release/api-change-bedrockagent-72999.json delete mode 100644 .changes/next-release/api-change-bedrockagentruntime-2027.json delete mode 100644 .changes/next-release/api-change-bedrockdataautomation-77172.json delete mode 100644 .changes/next-release/api-change-bedrockdataautomationruntime-71717.json delete mode 100644 .changes/next-release/api-change-bedrockruntime-67114.json delete mode 100644 .changes/next-release/api-change-kendra-74397.json delete mode 100644 .changes/next-release/api-change-sagemaker-9454.json diff --git a/.changes/1.35.75.json b/.changes/1.35.75.json new file mode 100644 index 0000000000..9d5a3c902a --- /dev/null +++ b/.changes/1.35.75.json @@ -0,0 +1,42 @@ +[ + { + "category": "``bedrock``", + "description": "Introduced two APIs ListPromptRouters and GetPromptRouter for Intelligent Prompt Router feature. Add support for Bedrock Guardrails image content filter. New Bedrock Marketplace feature enabling a wider range of bedrock compatible models with self-hosted capability.", + "type": "api-change" + }, + { + "category": "``bedrock-agent``", + "description": "This release introduces the ability to generate SQL using natural language, through a new GenerateQuery API (with native integration into Knowledge Bases); ability to ingest and retrieve images through Bedrock Data Automation; and ability to create a Knowledge Base backed by Kendra GenAI Index.", + "type": "api-change" + }, + { + "category": "``bedrock-agent-runtime``", + "description": "This release introduces the ability to generate SQL using natural language, through a new GenerateQuery API (with native integration into Knowledge Bases); ability to ingest and retrieve images through Bedrock Data Automation; and ability to create a Knowledge Base backed by Kendra GenAI Index.", + "type": "api-change" + }, + { + "category": "``bedrock-data-automation``", + "description": "Release Bedrock Data Automation SDK", + "type": "api-change" + }, + { + "category": "``bedrock-data-automation-runtime``", + "description": "Release Bedrock Data Automation Runtime SDK", + "type": "api-change" + }, + { + "category": "``bedrock-runtime``", + "description": "Added support for Intelligent Prompt Router in Invoke, InvokeStream, Converse and ConverseStream. Add support for Bedrock Guardrails image content filter. New Bedrock Marketplace feature enabling a wider range of bedrock compatible models with self-hosted capability.", + "type": "api-change" + }, + { + "category": "``kendra``", + "description": "This release adds GenAI Index in Amazon Kendra for Retrieval Augmented Generation (RAG) and intelligent search. With the Kendra GenAI Index, customers get high retrieval accuracy powered by the latest information retrieval technologies and semantic models.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Amazon SageMaker HyperPod launched task governance to help customers maximize accelerator utilization for model development and flexible training plans to meet training timelines and budget while reducing weeks of training time. AI apps from AWS partner is now available in SageMaker.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-bedrock-13860.json b/.changes/next-release/api-change-bedrock-13860.json deleted file mode 100644 index df00fbdd57..0000000000 --- a/.changes/next-release/api-change-bedrock-13860.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock``", - "description": "Introduced two APIs ListPromptRouters and GetPromptRouter for Intelligent Prompt Router feature. Add support for Bedrock Guardrails image content filter. New Bedrock Marketplace feature enabling a wider range of bedrock compatible models with self-hosted capability." -} diff --git a/.changes/next-release/api-change-bedrockagent-72999.json b/.changes/next-release/api-change-bedrockagent-72999.json deleted file mode 100644 index 2ef20a639f..0000000000 --- a/.changes/next-release/api-change-bedrockagent-72999.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock-agent``", - "description": "This release introduces the ability to generate SQL using natural language, through a new GenerateQuery API (with native integration into Knowledge Bases); ability to ingest and retrieve images through Bedrock Data Automation; and ability to create a Knowledge Base backed by Kendra GenAI Index." -} diff --git a/.changes/next-release/api-change-bedrockagentruntime-2027.json b/.changes/next-release/api-change-bedrockagentruntime-2027.json deleted file mode 100644 index 6a411146bf..0000000000 --- a/.changes/next-release/api-change-bedrockagentruntime-2027.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock-agent-runtime``", - "description": "This release introduces the ability to generate SQL using natural language, through a new GenerateQuery API (with native integration into Knowledge Bases); ability to ingest and retrieve images through Bedrock Data Automation; and ability to create a Knowledge Base backed by Kendra GenAI Index." -} diff --git a/.changes/next-release/api-change-bedrockdataautomation-77172.json b/.changes/next-release/api-change-bedrockdataautomation-77172.json deleted file mode 100644 index 5b33934a7a..0000000000 --- a/.changes/next-release/api-change-bedrockdataautomation-77172.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock-data-automation``", - "description": "Release Bedrock Data Automation SDK" -} diff --git a/.changes/next-release/api-change-bedrockdataautomationruntime-71717.json b/.changes/next-release/api-change-bedrockdataautomationruntime-71717.json deleted file mode 100644 index 5afd2a98ff..0000000000 --- a/.changes/next-release/api-change-bedrockdataautomationruntime-71717.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock-data-automation-runtime``", - "description": "Release Bedrock Data Automation Runtime SDK" -} diff --git a/.changes/next-release/api-change-bedrockruntime-67114.json b/.changes/next-release/api-change-bedrockruntime-67114.json deleted file mode 100644 index e1d8878da8..0000000000 --- a/.changes/next-release/api-change-bedrockruntime-67114.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock-runtime``", - "description": "Added support for Intelligent Prompt Router in Invoke, InvokeStream, Converse and ConverseStream. Add support for Bedrock Guardrails image content filter. New Bedrock Marketplace feature enabling a wider range of bedrock compatible models with self-hosted capability." -} diff --git a/.changes/next-release/api-change-kendra-74397.json b/.changes/next-release/api-change-kendra-74397.json deleted file mode 100644 index 5ba84fe10e..0000000000 --- a/.changes/next-release/api-change-kendra-74397.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``kendra``", - "description": "This release adds GenAI Index in Amazon Kendra for Retrieval Augmented Generation (RAG) and intelligent search. With the Kendra GenAI Index, customers get high retrieval accuracy powered by the latest information retrieval technologies and semantic models." -} diff --git a/.changes/next-release/api-change-sagemaker-9454.json b/.changes/next-release/api-change-sagemaker-9454.json deleted file mode 100644 index 494d2670f5..0000000000 --- a/.changes/next-release/api-change-sagemaker-9454.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``sagemaker``", - "description": "Amazon SageMaker HyperPod launched task governance to help customers maximize accelerator utilization for model development and flexible training plans to meet training timelines and budget while reducing weeks of training time. AI apps from AWS partner is now available in SageMaker." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3837dbc847..0cd7e081d3 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,19 @@ CHANGELOG ========= +1.35.75 +======= + +* api-change:``bedrock``: Introduced two APIs ListPromptRouters and GetPromptRouter for Intelligent Prompt Router feature. Add support for Bedrock Guardrails image content filter. New Bedrock Marketplace feature enabling a wider range of bedrock compatible models with self-hosted capability. +* api-change:``bedrock-agent``: This release introduces the ability to generate SQL using natural language, through a new GenerateQuery API (with native integration into Knowledge Bases); ability to ingest and retrieve images through Bedrock Data Automation; and ability to create a Knowledge Base backed by Kendra GenAI Index. +* api-change:``bedrock-agent-runtime``: This release introduces the ability to generate SQL using natural language, through a new GenerateQuery API (with native integration into Knowledge Bases); ability to ingest and retrieve images through Bedrock Data Automation; and ability to create a Knowledge Base backed by Kendra GenAI Index. +* api-change:``bedrock-data-automation``: Release Bedrock Data Automation SDK +* api-change:``bedrock-data-automation-runtime``: Release Bedrock Data Automation Runtime SDK +* api-change:``bedrock-runtime``: Added support for Intelligent Prompt Router in Invoke, InvokeStream, Converse and ConverseStream. Add support for Bedrock Guardrails image content filter. New Bedrock Marketplace feature enabling a wider range of bedrock compatible models with self-hosted capability. +* api-change:``kendra``: This release adds GenAI Index in Amazon Kendra for Retrieval Augmented Generation (RAG) and intelligent search. With the Kendra GenAI Index, customers get high retrieval accuracy powered by the latest information retrieval technologies and semantic models. +* api-change:``sagemaker``: Amazon SageMaker HyperPod launched task governance to help customers maximize accelerator utilization for model development and flexible training plans to meet training timelines and budget while reducing weeks of training time. AI apps from AWS partner is now available in SageMaker. + + 1.35.74 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 9ff18766b6..7ce27bb384 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.74' +__version__ = '1.35.75' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index fd6ba917d3..9bb5daf6c4 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.74' +release = '1.35.75' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.