diff --git a/clients/client-bedrock/src/commands/CreateGuardrailCommand.ts b/clients/client-bedrock/src/commands/CreateGuardrailCommand.ts index 5e5eb37797b6..615c9c66ce38 100644 --- a/clients/client-bedrock/src/commands/CreateGuardrailCommand.ts +++ b/clients/client-bedrock/src/commands/CreateGuardrailCommand.ts @@ -62,7 +62,7 @@ export interface CreateGuardrailCommandOutput extends CreateGuardrailResponse, _ * *

In addition to the above policies, you can also configure the messages to be returned to * the user if a user input or model response is in violation of the policies defined in the guardrail.

- *

For more information, see Guardrails for Amazon Bedrock in + *

For more information, see Amazon Bedrock Guardrails in * the Amazon Bedrock User Guide.

* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-bedrock/src/commands/CreateModelCustomizationJobCommand.ts b/clients/client-bedrock/src/commands/CreateModelCustomizationJobCommand.ts index 20e65e03bc3b..29d23a087968 100644 --- a/clients/client-bedrock/src/commands/CreateModelCustomizationJobCommand.ts +++ b/clients/client-bedrock/src/commands/CreateModelCustomizationJobCommand.ts @@ -6,7 +6,11 @@ import { MetadataBearer as __MetadataBearer } from "@smithy/types"; import { BedrockClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BedrockClient"; import { commonParams } from "../endpoint/EndpointParameters"; -import { CreateModelCustomizationJobRequest, CreateModelCustomizationJobResponse } from "../models/models_0"; +import { + CreateModelCustomizationJobRequest, + CreateModelCustomizationJobRequestFilterSensitiveLog, + CreateModelCustomizationJobResponse, +} from "../models/models_0"; import { de_CreateModelCustomizationJobCommand, se_CreateModelCustomizationJobCommand, @@ -54,7 +58,7 @@ export interface CreateModelCustomizationJobCommandOutput * roleArn: "STRING_VALUE", // required * clientRequestToken: "STRING_VALUE", * baseModelIdentifier: "STRING_VALUE", // required - * customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING", + * customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING" || "DISTILLATION", * customModelKmsKeyId: "STRING_VALUE", * jobTags: [ // TagList * { // Tag @@ -69,7 +73,39 @@ export interface CreateModelCustomizationJobCommandOutput * }, * ], * trainingDataConfig: { // TrainingDataConfig - * s3Uri: "STRING_VALUE", // required + * s3Uri: "STRING_VALUE", + * invocationLogsConfig: { // InvocationLogsConfig + * usePromptResponse: true || false, + * invocationLogSource: { // InvocationLogSource Union: only one key present + * s3Uri: "STRING_VALUE", + * }, + * requestMetadataFilters: { // RequestMetadataFilters Union: only one key present + * equals: { // RequestMetadataMap + * "": "STRING_VALUE", + * }, + * notEquals: { + * "": "STRING_VALUE", + * }, + * andAll: [ // RequestMetadataFiltersList + * { // RequestMetadataBaseFilters + * equals: { + * "": "STRING_VALUE", + * }, + * notEquals: { + * "": "STRING_VALUE", + * }, + * }, + * ], + * orAll: [ + * { + * equals: { + * "": "STRING_VALUE", + * }, + * notEquals: "", + * }, + * ], + * }, + * }, * }, * validationDataConfig: { // ValidationDataConfig * validators: [ // Validators // required @@ -81,7 +117,7 @@ export interface CreateModelCustomizationJobCommandOutput * outputDataConfig: { // OutputDataConfig * s3Uri: "STRING_VALUE", // required * }, - * hyperParameters: { // ModelCustomizationHyperParameters // required + * hyperParameters: { // ModelCustomizationHyperParameters * "": "STRING_VALUE", * }, * vpcConfig: { // VpcConfig @@ -92,6 +128,14 @@ export interface CreateModelCustomizationJobCommandOutput * "STRING_VALUE", * ], * }, + * customizationConfig: { // CustomizationConfig Union: only one key present + * distillationConfig: { // DistillationConfig + * teacherModelConfig: { // TeacherModelConfig + * teacherModelIdentifier: "STRING_VALUE", // required + * maxResponseLengthForInference: Number("int"), + * }, + * }, + * }, * }; * const command = new CreateModelCustomizationJobCommand(input); * const response = await client.send(command); @@ -154,7 +198,7 @@ export class CreateModelCustomizationJobCommand extends $Command }) .s("AmazonBedrockControlPlaneService", "CreateModelCustomizationJob", {}) .n("BedrockClient", "CreateModelCustomizationJobCommand") - .f(void 0, void 0) + .f(CreateModelCustomizationJobRequestFilterSensitiveLog, void 0) .ser(se_CreateModelCustomizationJobCommand) .de(de_CreateModelCustomizationJobCommand) .build() { diff --git a/clients/client-bedrock/src/commands/GetCustomModelCommand.ts b/clients/client-bedrock/src/commands/GetCustomModelCommand.ts index 328abb8b14c0..b144a1e2f41c 100644 --- a/clients/client-bedrock/src/commands/GetCustomModelCommand.ts +++ b/clients/client-bedrock/src/commands/GetCustomModelCommand.ts @@ -6,7 +6,11 @@ import { MetadataBearer as __MetadataBearer } from "@smithy/types"; import { BedrockClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BedrockClient"; import { commonParams } from "../endpoint/EndpointParameters"; -import { GetCustomModelRequest, GetCustomModelResponse } from "../models/models_0"; +import { + GetCustomModelRequest, + GetCustomModelResponse, + GetCustomModelResponseFilterSensitiveLog, +} from "../models/models_0"; import { de_GetCustomModelCommand, se_GetCustomModelCommand } from "../protocols/Aws_restJson1"; /** @@ -46,13 +50,45 @@ export interface GetCustomModelCommandOutput extends GetCustomModelResponse, __M * // jobName: "STRING_VALUE", * // jobArn: "STRING_VALUE", // required * // baseModelArn: "STRING_VALUE", // required - * // customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING", + * // customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING" || "DISTILLATION", * // modelKmsKeyArn: "STRING_VALUE", * // hyperParameters: { // ModelCustomizationHyperParameters * // "": "STRING_VALUE", * // }, * // trainingDataConfig: { // TrainingDataConfig - * // s3Uri: "STRING_VALUE", // required + * // s3Uri: "STRING_VALUE", + * // invocationLogsConfig: { // InvocationLogsConfig + * // usePromptResponse: true || false, + * // invocationLogSource: { // InvocationLogSource Union: only one key present + * // s3Uri: "STRING_VALUE", + * // }, + * // requestMetadataFilters: { // RequestMetadataFilters Union: only one key present + * // equals: { // RequestMetadataMap + * // "": "STRING_VALUE", + * // }, + * // notEquals: { + * // "": "STRING_VALUE", + * // }, + * // andAll: [ // RequestMetadataFiltersList + * // { // RequestMetadataBaseFilters + * // equals: { + * // "": "STRING_VALUE", + * // }, + * // notEquals: { + * // "": "STRING_VALUE", + * // }, + * // }, + * // ], + * // orAll: [ + * // { + * // equals: { + * // "": "STRING_VALUE", + * // }, + * // notEquals: "", + * // }, + * // ], + * // }, + * // }, * // }, * // validationDataConfig: { // ValidationDataConfig * // validators: [ // Validators // required @@ -73,6 +109,14 @@ export interface GetCustomModelCommandOutput extends GetCustomModelResponse, __M * // }, * // ], * // creationTime: new Date("TIMESTAMP"), // required + * // customizationConfig: { // CustomizationConfig Union: only one key present + * // distillationConfig: { // DistillationConfig + * // teacherModelConfig: { // TeacherModelConfig + * // teacherModelIdentifier: "STRING_VALUE", // required + * // maxResponseLengthForInference: Number("int"), + * // }, + * // }, + * // }, * // }; * * ``` @@ -120,7 +164,7 @@ export class GetCustomModelCommand extends $Command }) .s("AmazonBedrockControlPlaneService", "GetCustomModel", {}) .n("BedrockClient", "GetCustomModelCommand") - .f(void 0, void 0) + .f(void 0, GetCustomModelResponseFilterSensitiveLog) .ser(se_GetCustomModelCommand) .de(de_GetCustomModelCommand) .build() { diff --git a/clients/client-bedrock/src/commands/GetFoundationModelCommand.ts b/clients/client-bedrock/src/commands/GetFoundationModelCommand.ts index c32ecc83a672..c239345337c6 100644 --- a/clients/client-bedrock/src/commands/GetFoundationModelCommand.ts +++ b/clients/client-bedrock/src/commands/GetFoundationModelCommand.ts @@ -54,7 +54,7 @@ export interface GetFoundationModelCommandOutput extends GetFoundationModelRespo * // ], * // responseStreamingSupported: true || false, * // customizationsSupported: [ // ModelCustomizationList - * // "FINE_TUNING" || "CONTINUED_PRE_TRAINING", + * // "FINE_TUNING" || "CONTINUED_PRE_TRAINING" || "DISTILLATION", * // ], * // inferenceTypesSupported: [ // InferenceTypeList * // "ON_DEMAND" || "PROVISIONED", diff --git a/clients/client-bedrock/src/commands/GetModelCustomizationJobCommand.ts b/clients/client-bedrock/src/commands/GetModelCustomizationJobCommand.ts index 06e9debab0b9..9642147b280b 100644 --- a/clients/client-bedrock/src/commands/GetModelCustomizationJobCommand.ts +++ b/clients/client-bedrock/src/commands/GetModelCustomizationJobCommand.ts @@ -6,7 +6,11 @@ import { MetadataBearer as __MetadataBearer } from "@smithy/types"; import { BedrockClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BedrockClient"; import { commonParams } from "../endpoint/EndpointParameters"; -import { GetModelCustomizationJobRequest, GetModelCustomizationJobResponse } from "../models/models_0"; +import { + GetModelCustomizationJobRequest, + GetModelCustomizationJobResponse, + GetModelCustomizationJobResponseFilterSensitiveLog, +} from "../models/models_0"; import { de_GetModelCustomizationJobCommand, se_GetModelCustomizationJobCommand } from "../protocols/Aws_restJson1"; /** @@ -54,11 +58,43 @@ export interface GetModelCustomizationJobCommandOutput extends GetModelCustomiza * // lastModifiedTime: new Date("TIMESTAMP"), * // endTime: new Date("TIMESTAMP"), * // baseModelArn: "STRING_VALUE", // required - * // hyperParameters: { // ModelCustomizationHyperParameters // required + * // hyperParameters: { // ModelCustomizationHyperParameters * // "": "STRING_VALUE", * // }, * // trainingDataConfig: { // TrainingDataConfig - * // s3Uri: "STRING_VALUE", // required + * // s3Uri: "STRING_VALUE", + * // invocationLogsConfig: { // InvocationLogsConfig + * // usePromptResponse: true || false, + * // invocationLogSource: { // InvocationLogSource Union: only one key present + * // s3Uri: "STRING_VALUE", + * // }, + * // requestMetadataFilters: { // RequestMetadataFilters Union: only one key present + * // equals: { // RequestMetadataMap + * // "": "STRING_VALUE", + * // }, + * // notEquals: { + * // "": "STRING_VALUE", + * // }, + * // andAll: [ // RequestMetadataFiltersList + * // { // RequestMetadataBaseFilters + * // equals: { + * // "": "STRING_VALUE", + * // }, + * // notEquals: { + * // "": "STRING_VALUE", + * // }, + * // }, + * // ], + * // orAll: [ + * // { + * // equals: { + * // "": "STRING_VALUE", + * // }, + * // notEquals: "", + * // }, + * // ], + * // }, + * // }, * // }, * // validationDataConfig: { // ValidationDataConfig * // validators: [ // Validators // required @@ -70,7 +106,7 @@ export interface GetModelCustomizationJobCommandOutput extends GetModelCustomiza * // outputDataConfig: { // OutputDataConfig * // s3Uri: "STRING_VALUE", // required * // }, - * // customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING", + * // customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING" || "DISTILLATION", * // outputModelKmsKeyArn: "STRING_VALUE", * // trainingMetrics: { // TrainingMetrics * // trainingLoss: Number("float"), @@ -88,6 +124,14 @@ export interface GetModelCustomizationJobCommandOutput extends GetModelCustomiza * // "STRING_VALUE", * // ], * // }, + * // customizationConfig: { // CustomizationConfig Union: only one key present + * // distillationConfig: { // DistillationConfig + * // teacherModelConfig: { // TeacherModelConfig + * // teacherModelIdentifier: "STRING_VALUE", // required + * // maxResponseLengthForInference: Number("int"), + * // }, + * // }, + * // }, * // }; * * ``` @@ -135,7 +179,7 @@ export class GetModelCustomizationJobCommand extends $Command }) .s("AmazonBedrockControlPlaneService", "GetModelCustomizationJob", {}) .n("BedrockClient", "GetModelCustomizationJobCommand") - .f(void 0, void 0) + .f(void 0, GetModelCustomizationJobResponseFilterSensitiveLog) .ser(se_GetModelCustomizationJobCommand) .de(de_GetModelCustomizationJobCommand) .build() { diff --git a/clients/client-bedrock/src/commands/GetModelInvocationJobCommand.ts b/clients/client-bedrock/src/commands/GetModelInvocationJobCommand.ts index 6dd6120d2300..5d62161fa54d 100644 --- a/clients/client-bedrock/src/commands/GetModelInvocationJobCommand.ts +++ b/clients/client-bedrock/src/commands/GetModelInvocationJobCommand.ts @@ -32,7 +32,7 @@ export interface GetModelInvocationJobCommandInput extends GetModelInvocationJob export interface GetModelInvocationJobCommandOutput extends GetModelInvocationJobResponse, __MetadataBearer {} /** - *

Gets details about a batch inference job. For more information, see View details about a batch inference job + *

Gets details about a batch inference job. For more information, see Monitor batch inference jobs *

* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-bedrock/src/commands/GetModelInvocationLoggingConfigurationCommand.ts b/clients/client-bedrock/src/commands/GetModelInvocationLoggingConfigurationCommand.ts index f0d04f93d3ef..d268c021bce7 100644 --- a/clients/client-bedrock/src/commands/GetModelInvocationLoggingConfigurationCommand.ts +++ b/clients/client-bedrock/src/commands/GetModelInvocationLoggingConfigurationCommand.ts @@ -64,6 +64,7 @@ export interface GetModelInvocationLoggingConfigurationCommandOutput * // textDataDeliveryEnabled: true || false, * // imageDataDeliveryEnabled: true || false, * // embeddingDataDeliveryEnabled: true || false, + * // videoDataDeliveryEnabled: true || false, * // }, * // }; * diff --git a/clients/client-bedrock/src/commands/ListCustomModelsCommand.ts b/clients/client-bedrock/src/commands/ListCustomModelsCommand.ts index c5f95c52cf36..ba11e45219cc 100644 --- a/clients/client-bedrock/src/commands/ListCustomModelsCommand.ts +++ b/clients/client-bedrock/src/commands/ListCustomModelsCommand.ts @@ -59,7 +59,7 @@ export interface ListCustomModelsCommandOutput extends ListCustomModelsResponse, * // creationTime: new Date("TIMESTAMP"), // required * // baseModelArn: "STRING_VALUE", // required * // baseModelName: "STRING_VALUE", // required - * // customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING", + * // customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING" || "DISTILLATION", * // ownerAccountId: "STRING_VALUE", * // }, * // ], diff --git a/clients/client-bedrock/src/commands/ListFoundationModelsCommand.ts b/clients/client-bedrock/src/commands/ListFoundationModelsCommand.ts index a76c9f97d571..d31ace787d82 100644 --- a/clients/client-bedrock/src/commands/ListFoundationModelsCommand.ts +++ b/clients/client-bedrock/src/commands/ListFoundationModelsCommand.ts @@ -37,7 +37,7 @@ export interface ListFoundationModelsCommandOutput extends ListFoundationModelsR * const client = new BedrockClient(config); * const input = { // ListFoundationModelsRequest * byProvider: "STRING_VALUE", - * byCustomizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING", + * byCustomizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING" || "DISTILLATION", * byOutputModality: "TEXT" || "IMAGE" || "EMBEDDING", * byInferenceType: "ON_DEMAND" || "PROVISIONED", * }; @@ -58,7 +58,7 @@ export interface ListFoundationModelsCommandOutput extends ListFoundationModelsR * // ], * // responseStreamingSupported: true || false, * // customizationsSupported: [ // ModelCustomizationList - * // "FINE_TUNING" || "CONTINUED_PRE_TRAINING", + * // "FINE_TUNING" || "CONTINUED_PRE_TRAINING" || "DISTILLATION", * // ], * // inferenceTypesSupported: [ // InferenceTypeList * // "ON_DEMAND" || "PROVISIONED", diff --git a/clients/client-bedrock/src/commands/ListModelCustomizationJobsCommand.ts b/clients/client-bedrock/src/commands/ListModelCustomizationJobsCommand.ts index c69b8b95a6ce..e948a6b72428 100644 --- a/clients/client-bedrock/src/commands/ListModelCustomizationJobsCommand.ts +++ b/clients/client-bedrock/src/commands/ListModelCustomizationJobsCommand.ts @@ -62,7 +62,7 @@ export interface ListModelCustomizationJobsCommandOutput extends ListModelCustom * // endTime: new Date("TIMESTAMP"), * // customModelArn: "STRING_VALUE", * // customModelName: "STRING_VALUE", - * // customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING", + * // customizationType: "FINE_TUNING" || "CONTINUED_PRE_TRAINING" || "DISTILLATION", * // }, * // ], * // }; diff --git a/clients/client-bedrock/src/commands/ListModelInvocationJobsCommand.ts b/clients/client-bedrock/src/commands/ListModelInvocationJobsCommand.ts index 81ac9a2d2cf9..f6bf4ae03fd8 100644 --- a/clients/client-bedrock/src/commands/ListModelInvocationJobsCommand.ts +++ b/clients/client-bedrock/src/commands/ListModelInvocationJobsCommand.ts @@ -32,7 +32,7 @@ export interface ListModelInvocationJobsCommandInput extends ListModelInvocation export interface ListModelInvocationJobsCommandOutput extends ListModelInvocationJobsResponse, __MetadataBearer {} /** - *

Lists all batch inference jobs in the account. For more information, see View details about a batch inference job.

+ *

Lists all batch inference jobs in the account. For more information, see View details about a batch inference job.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-bedrock/src/commands/PutModelInvocationLoggingConfigurationCommand.ts b/clients/client-bedrock/src/commands/PutModelInvocationLoggingConfigurationCommand.ts index e97f934313d8..55908ecf48b6 100644 --- a/clients/client-bedrock/src/commands/PutModelInvocationLoggingConfigurationCommand.ts +++ b/clients/client-bedrock/src/commands/PutModelInvocationLoggingConfigurationCommand.ts @@ -61,6 +61,7 @@ export interface PutModelInvocationLoggingConfigurationCommandOutput * textDataDeliveryEnabled: true || false, * imageDataDeliveryEnabled: true || false, * embeddingDataDeliveryEnabled: true || false, + * videoDataDeliveryEnabled: true || false, * }, * }; * const command = new PutModelInvocationLoggingConfigurationCommand(input); diff --git a/clients/client-bedrock/src/commands/StopModelInvocationJobCommand.ts b/clients/client-bedrock/src/commands/StopModelInvocationJobCommand.ts index 3510fe1a8070..a4ac645680fd 100644 --- a/clients/client-bedrock/src/commands/StopModelInvocationJobCommand.ts +++ b/clients/client-bedrock/src/commands/StopModelInvocationJobCommand.ts @@ -28,7 +28,7 @@ export interface StopModelInvocationJobCommandInput extends StopModelInvocationJ export interface StopModelInvocationJobCommandOutput extends StopModelInvocationJobResponse, __MetadataBearer {} /** - *

Stops a batch inference job. You're only charged for tokens that were already processed. For more information, see Stop a batch inference job.

+ *

Stops a batch inference job. You're only charged for tokens that were already processed. For more information, see Stop a batch inference job.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-bedrock/src/models/models_0.ts b/clients/client-bedrock/src/models/models_0.ts index 8fd0391386f6..174ee1d57167 100644 --- a/clients/client-bedrock/src/models/models_0.ts +++ b/clients/client-bedrock/src/models/models_0.ts @@ -3182,6 +3182,12 @@ export interface LoggingConfig { * @public */ embeddingDataDeliveryEnabled?: boolean | undefined; + + /** + *

Set to include video data in the log delivery.

+ * @public + */ + videoDataDeliveryEnabled?: boolean | undefined; } /** @@ -4274,7 +4280,7 @@ export interface CreateModelInvocationJobRequest { outputDataConfig: ModelInvocationJobOutputDataConfig | undefined; /** - *

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

+ *

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

* @public */ vpcConfig?: VpcConfig | undefined; @@ -4373,6 +4379,50 @@ export interface GetModelInvocationJobResponse { /** *

The status of the batch inference job.

+ *

The following statuses are possible:

+ *
    + *
  • + *

    Submitted – This job has been submitted to a queue for validation.

    + *
  • + *
  • + *

    Validating – This job is being validated for the requirements described in Format and upload your batch inference data. The criteria include the following:

    + *
      + *
    • + *

      Your IAM service role has access to the Amazon S3 buckets containing your files.

      + *
    • + *
    • + *

      Your files are .jsonl files and each individual record is a JSON object in the correct format. Note that validation doesn't check if the modelInput value matches the request body for the model.

      + *
    • + *
    • + *

      Your files fulfill the requirements for file size and number of records. For more information, see Quotas for Amazon Bedrock.

      + *
    • + *
    + *
  • + *
  • + *

    Scheduled – This job has been validated and is now in a queue. The job will automatically start when it reaches its turn.

    + *
  • + *
  • + *

    Expired – This job timed out because it was scheduled but didn't begin before the set timeout duration. Submit a new job request.

    + *
  • + *
  • + *

    InProgress – This job has begun. You can start viewing the results in the output S3 location.

    + *
  • + *
  • + *

    Completed – This job has successfully completed. View the output files in the output S3 location.

    + *
  • + *
  • + *

    PartiallyCompleted – This job has partially completed. Not all of your records could be processed in time. View the output files in the output S3 location.

    + *
  • + *
  • + *

    Failed – This job has failed. Check the failure message for any further details. For further assistance, reach out to the Amazon Web Services Support Center.

    + *
  • + *
  • + *

    Stopped – This job was stopped by a user.

    + *
  • + *
  • + *

    Stopping – This job is being stopped by a user.

    + *
  • + *
* @public */ status?: ModelInvocationJobStatus | undefined; @@ -4414,7 +4464,7 @@ export interface GetModelInvocationJobResponse { outputDataConfig: ModelInvocationJobOutputDataConfig | undefined; /** - *

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

+ *

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

* @public */ vpcConfig?: VpcConfig | undefined; @@ -4450,6 +4500,50 @@ export interface ListModelInvocationJobsRequest { /** *

Specify a status to filter for batch inference jobs whose statuses match the string you specify.

+ *

The following statuses are possible:

+ *
    + *
  • + *

    Submitted – This job has been submitted to a queue for validation.

    + *
  • + *
  • + *

    Validating – This job is being validated for the requirements described in Format and upload your batch inference data. The criteria include the following:

    + *
      + *
    • + *

      Your IAM service role has access to the Amazon S3 buckets containing your files.

      + *
    • + *
    • + *

      Your files are .jsonl files and each individual record is a JSON object in the correct format. Note that validation doesn't check if the modelInput value matches the request body for the model.

      + *
    • + *
    • + *

      Your files fulfill the requirements for file size and number of records. For more information, see Quotas for Amazon Bedrock.

      + *
    • + *
    + *
  • + *
  • + *

    Scheduled – This job has been validated and is now in a queue. The job will automatically start when it reaches its turn.

    + *
  • + *
  • + *

    Expired – This job timed out because it was scheduled but didn't begin before the set timeout duration. Submit a new job request.

    + *
  • + *
  • + *

    InProgress – This job has begun. You can start viewing the results in the output S3 location.

    + *
  • + *
  • + *

    Completed – This job has successfully completed. View the output files in the output S3 location.

    + *
  • + *
  • + *

    PartiallyCompleted – This job has partially completed. Not all of your records could be processed in time. View the output files in the output S3 location.

    + *
  • + *
  • + *

    Failed – This job has failed. Check the failure message for any further details. For further assistance, reach out to the Amazon Web Services Support Center.

    + *
  • + *
  • + *

    Stopped – This job was stopped by a user.

    + *
  • + *
  • + *

    Stopping – This job is being stopped by a user.

    + *
  • + *
* @public */ statusEquals?: ModelInvocationJobStatus | undefined; @@ -4527,6 +4621,50 @@ export interface ModelInvocationJobSummary { /** *

The status of the batch inference job.

+ *

The following statuses are possible:

+ *
    + *
  • + *

    Submitted – This job has been submitted to a queue for validation.

    + *
  • + *
  • + *

    Validating – This job is being validated for the requirements described in Format and upload your batch inference data. The criteria include the following:

    + *
      + *
    • + *

      Your IAM service role has access to the Amazon S3 buckets containing your files.

      + *
    • + *
    • + *

      Your files are .jsonl files and each individual record is a JSON object in the correct format. Note that validation doesn't check if the modelInput value matches the request body for the model.

      + *
    • + *
    • + *

      Your files fulfill the requirements for file size and number of records. For more information, see Quotas for Amazon Bedrock.

      + *
    • + *
    + *
  • + *
  • + *

    Scheduled – This job has been validated and is now in a queue. The job will automatically start when it reaches its turn.

    + *
  • + *
  • + *

    Expired – This job timed out because it was scheduled but didn't begin before the set timeout duration. Submit a new job request.

    + *
  • + *
  • + *

    InProgress – This job has begun. You can start viewing the results in the output S3 location.

    + *
  • + *
  • + *

    Completed – This job has successfully completed. View the output files in the output S3 location.

    + *
  • + *
  • + *

    PartiallyCompleted – This job has partially completed. Not all of your records could be processed in time. View the output files in the output S3 location.

    + *
  • + *
  • + *

    Failed – This job has failed. Check the failure message for any further details. For further assistance, reach out to the Amazon Web Services Support Center.

    + *
  • + *
  • + *

    Stopped – This job was stopped by a user.

    + *
  • + *
  • + *

    Stopping – This job is being stopped by a user.

    + *
  • + *
* @public */ status?: ModelInvocationJobStatus | undefined; @@ -4568,7 +4706,7 @@ export interface ModelInvocationJobSummary { outputDataConfig: ModelInvocationJobOutputDataConfig | undefined; /** - *

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

+ *

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

* @public */ vpcConfig?: VpcConfig | undefined; @@ -4646,12 +4784,81 @@ export interface GetCustomModelRequest { modelIdentifier: string | undefined; } +/** + *

Details about a teacher model used for model customization.

+ * @public + */ +export interface TeacherModelConfig { + /** + *

The identifier of the teacher model.

+ * @public + */ + teacherModelIdentifier: string | undefined; + + /** + *

The maximum number of tokens requested when the customization job invokes the teacher model.

+ * @public + */ + maxResponseLengthForInference?: number | undefined; +} + +/** + *

Settings for distilling a foundation model into a smaller and more efficient model.

+ * @public + */ +export interface DistillationConfig { + /** + *

The teacher model configuration.

+ * @public + */ + teacherModelConfig: TeacherModelConfig | undefined; +} + +/** + *

A model customization configuration

+ * @public + */ +export type CustomizationConfig = CustomizationConfig.DistillationConfigMember | CustomizationConfig.$UnknownMember; + +/** + * @public + */ +export namespace CustomizationConfig { + /** + *

The distillation configuration for the custom model.

+ * @public + */ + export interface DistillationConfigMember { + distillationConfig: DistillationConfig; + $unknown?: never; + } + + /** + * @public + */ + export interface $UnknownMember { + distillationConfig?: never; + $unknown: [string, any]; + } + + export interface Visitor { + distillationConfig: (value: DistillationConfig) => T; + _: (name: string, value: any) => T; + } + + export const visit = (value: CustomizationConfig, visitor: Visitor): T => { + if (value.distillationConfig !== undefined) return visitor.distillationConfig(value.distillationConfig); + return visitor._(value.$unknown[0], value.$unknown[1]); + }; +} + /** * @public * @enum */ export const CustomizationType = { CONTINUED_PRE_TRAINING: "CONTINUED_PRE_TRAINING", + DISTILLATION: "DISTILLATION", FINE_TUNING: "FINE_TUNING", } as const; @@ -4672,6 +4879,179 @@ export interface OutputDataConfig { s3Uri: string | undefined; } +/** + *

A storage location for invocation logs.

+ * @public + */ +export type InvocationLogSource = InvocationLogSource.S3UriMember | InvocationLogSource.$UnknownMember; + +/** + * @public + */ +export namespace InvocationLogSource { + /** + *

The URI of an invocation log in a bucket.

+ * @public + */ + export interface S3UriMember { + s3Uri: string; + $unknown?: never; + } + + /** + * @public + */ + export interface $UnknownMember { + s3Uri?: never; + $unknown: [string, any]; + } + + export interface Visitor { + s3Uri: (value: string) => T; + _: (name: string, value: any) => T; + } + + export const visit = (value: InvocationLogSource, visitor: Visitor): T => { + if (value.s3Uri !== undefined) return visitor.s3Uri(value.s3Uri); + return visitor._(value.$unknown[0], value.$unknown[1]); + }; +} + +/** + *

A mapping of a metadata key to a value that it should or should not equal.

+ * @public + */ +export interface RequestMetadataBaseFilters { + /** + *

Include results where the key equals the value.

+ * @public + */ + equals?: Record | undefined; + + /** + *

Include results where the key does not equal the value.

+ * @public + */ + notEquals?: Record | undefined; +} + +/** + *

Rules for filtering invocation logs. A filter can be a mapping of a metadata + * key to a value that it should or should not equal (a base filter), or a list of base filters + * that are all applied with AND or OR logical operators

+ * @public + */ +export type RequestMetadataFilters = + | RequestMetadataFilters.AndAllMember + | RequestMetadataFilters.EqualsMember + | RequestMetadataFilters.NotEqualsMember + | RequestMetadataFilters.OrAllMember + | RequestMetadataFilters.$UnknownMember; + +/** + * @public + */ +export namespace RequestMetadataFilters { + /** + *

Include results where the key equals the value.

+ * @public + */ + export interface EqualsMember { + equals: Record; + notEquals?: never; + andAll?: never; + orAll?: never; + $unknown?: never; + } + + /** + *

Include results where the key does not equal the value.

+ * @public + */ + export interface NotEqualsMember { + equals?: never; + notEquals: Record; + andAll?: never; + orAll?: never; + $unknown?: never; + } + + /** + *

Include results where all of the based filters match.

+ * @public + */ + export interface AndAllMember { + equals?: never; + notEquals?: never; + andAll: RequestMetadataBaseFilters[]; + orAll?: never; + $unknown?: never; + } + + /** + *

Include results where any of the base filters match.

+ * @public + */ + export interface OrAllMember { + equals?: never; + notEquals?: never; + andAll?: never; + orAll: RequestMetadataBaseFilters[]; + $unknown?: never; + } + + /** + * @public + */ + export interface $UnknownMember { + equals?: never; + notEquals?: never; + andAll?: never; + orAll?: never; + $unknown: [string, any]; + } + + export interface Visitor { + equals: (value: Record) => T; + notEquals: (value: Record) => T; + andAll: (value: RequestMetadataBaseFilters[]) => T; + orAll: (value: RequestMetadataBaseFilters[]) => T; + _: (name: string, value: any) => T; + } + + export const visit = (value: RequestMetadataFilters, visitor: Visitor): T => { + if (value.equals !== undefined) return visitor.equals(value.equals); + if (value.notEquals !== undefined) return visitor.notEquals(value.notEquals); + if (value.andAll !== undefined) return visitor.andAll(value.andAll); + if (value.orAll !== undefined) return visitor.orAll(value.orAll); + return visitor._(value.$unknown[0], value.$unknown[1]); + }; +} + +/** + *

Settings for using invocation logs to customize a model.

+ * @public + */ +export interface InvocationLogsConfig { + /** + *

Whether to use the model's response for training, or just the prompt. The default value is False.

+ * @public + */ + usePromptResponse?: boolean | undefined; + + /** + *

The source of the invocation logs.

+ * @public + */ + invocationLogSource: InvocationLogSource | undefined; + + /** + *

Rules for filtering invocation logs based on request metadata.

+ * @public + */ + requestMetadataFilters?: RequestMetadataFilters | undefined; +} + /** *

S3 Location of the training data.

* @public @@ -4681,7 +5061,13 @@ export interface TrainingDataConfig { *

The S3 URI where the training data is stored.

* @public */ - s3Uri: string | undefined; + s3Uri?: string | undefined; + + /** + *

Settings for using invocation logs to customize a model.

+ * @public + */ + invocationLogsConfig?: InvocationLogsConfig | undefined; } /** @@ -4819,6 +5205,12 @@ export interface GetCustomModelResponse { * @public */ creationTime: Date | undefined; + + /** + *

The customization configuration for the custom model.

+ * @public + */ + customizationConfig?: CustomizationConfig | undefined; } /** @@ -4838,6 +5230,7 @@ export interface GetFoundationModelRequest { */ export const ModelCustomization = { CONTINUED_PRE_TRAINING: "CONTINUED_PRE_TRAINING", + DISTILLATION: "DISTILLATION", FINE_TUNING: "FINE_TUNING", } as const; @@ -5776,13 +6169,19 @@ export interface CreateModelCustomizationJobRequest { *

Parameters related to tuning the model. For details on the format for different models, see Custom model hyperparameters.

* @public */ - hyperParameters: Record | undefined; + hyperParameters?: Record | undefined; /** *

The configuration of the Virtual Private Cloud (VPC) that contains the resources that you're using for this job. For more information, see Protect your model customization jobs using a VPC.

* @public */ vpcConfig?: VpcConfig | undefined; + + /** + *

The customization configuration for the model customization job.

+ * @public + */ + customizationConfig?: CustomizationConfig | undefined; } /** @@ -5906,7 +6305,7 @@ export interface GetModelCustomizationJobResponse { *

The hyperparameter values for the job. For details on the format for different models, see Custom model hyperparameters.

* @public */ - hyperParameters: Record | undefined; + hyperParameters?: Record | undefined; /** *

Contains information about the training dataset.

@@ -5955,6 +6354,12 @@ export interface GetModelCustomizationJobResponse { * @public */ vpcConfig?: VpcConfig | undefined; + + /** + *

The customization configuration for the model customization job.

+ * @public + */ + customizationConfig?: CustomizationConfig | undefined; } /** @@ -7338,6 +7743,76 @@ export const ListModelInvocationJobsResponseFilterSensitiveLog = (obj: ListModel }), }); +/** + * @internal + */ +export const RequestMetadataBaseFiltersFilterSensitiveLog = (obj: RequestMetadataBaseFilters): any => ({ + ...obj, + ...(obj.equals && { equals: SENSITIVE_STRING }), + ...(obj.notEquals && { notEquals: SENSITIVE_STRING }), +}); + +/** + * @internal + */ +export const RequestMetadataFiltersFilterSensitiveLog = (obj: RequestMetadataFilters): any => { + if (obj.equals !== undefined) return { equals: SENSITIVE_STRING }; + if (obj.notEquals !== undefined) return { notEquals: SENSITIVE_STRING }; + if (obj.andAll !== undefined) + return { andAll: obj.andAll.map((item) => RequestMetadataBaseFiltersFilterSensitiveLog(item)) }; + if (obj.orAll !== undefined) + return { orAll: obj.orAll.map((item) => RequestMetadataBaseFiltersFilterSensitiveLog(item)) }; + if (obj.$unknown !== undefined) return { [obj.$unknown[0]]: "UNKNOWN" }; +}; + +/** + * @internal + */ +export const InvocationLogsConfigFilterSensitiveLog = (obj: InvocationLogsConfig): any => ({ + ...obj, + ...(obj.invocationLogSource && { invocationLogSource: obj.invocationLogSource }), + ...(obj.requestMetadataFilters && { + requestMetadataFilters: RequestMetadataFiltersFilterSensitiveLog(obj.requestMetadataFilters), + }), +}); + +/** + * @internal + */ +export const TrainingDataConfigFilterSensitiveLog = (obj: TrainingDataConfig): any => ({ + ...obj, + ...(obj.invocationLogsConfig && { + invocationLogsConfig: InvocationLogsConfigFilterSensitiveLog(obj.invocationLogsConfig), + }), +}); + +/** + * @internal + */ +export const GetCustomModelResponseFilterSensitiveLog = (obj: GetCustomModelResponse): any => ({ + ...obj, + ...(obj.trainingDataConfig && { trainingDataConfig: TrainingDataConfigFilterSensitiveLog(obj.trainingDataConfig) }), + ...(obj.customizationConfig && { customizationConfig: obj.customizationConfig }), +}); + +/** + * @internal + */ +export const CreateModelCustomizationJobRequestFilterSensitiveLog = (obj: CreateModelCustomizationJobRequest): any => ({ + ...obj, + ...(obj.trainingDataConfig && { trainingDataConfig: TrainingDataConfigFilterSensitiveLog(obj.trainingDataConfig) }), + ...(obj.customizationConfig && { customizationConfig: obj.customizationConfig }), +}); + +/** + * @internal + */ +export const GetModelCustomizationJobResponseFilterSensitiveLog = (obj: GetModelCustomizationJobResponse): any => ({ + ...obj, + ...(obj.trainingDataConfig && { trainingDataConfig: TrainingDataConfigFilterSensitiveLog(obj.trainingDataConfig) }), + ...(obj.customizationConfig && { customizationConfig: obj.customizationConfig }), +}); + /** * @internal */ diff --git a/clients/client-bedrock/src/protocols/Aws_restJson1.ts b/clients/client-bedrock/src/protocols/Aws_restJson1.ts index 959ec59e8a3f..669b22d72be8 100644 --- a/clients/client-bedrock/src/protocols/Aws_restJson1.ts +++ b/clients/client-bedrock/src/protocols/Aws_restJson1.ts @@ -175,7 +175,9 @@ import { ByteContentDoc, CloudWatchConfig, ConflictException, + CustomizationConfig, CustomModelSummary, + DistillationConfig, EvaluationBedrockModel, EvaluationConfig, EvaluationDataset, @@ -214,6 +216,8 @@ import { InferenceProfileModelSource, InferenceProfileSummary, InternalServerException, + InvocationLogsConfig, + InvocationLogSource, KbInferenceConfig, KnowledgeBaseConfig, KnowledgeBaseRetrievalConfiguration, @@ -235,6 +239,8 @@ import { ProvisionedModelSummary, QueryTransformationConfiguration, RAGConfig, + RequestMetadataBaseFilters, + RequestMetadataFilters, ResourceNotFoundException, RetrievalFilter, RetrieveAndGenerateConfiguration, @@ -244,6 +250,7 @@ import { S3ObjectDoc, ServiceQuotaExceededException, Tag, + TeacherModelConfig, TextInferenceConfig, ThrottlingException, TooManyTagsException, @@ -438,6 +445,7 @@ export const se_CreateModelCustomizationJobCommand = async ( customModelKmsKeyId: [], customModelName: [], customModelTags: (_) => _json(_), + customizationConfig: (_) => _json(_), customizationType: [], hyperParameters: (_) => _json(_), jobName: [], @@ -1627,6 +1635,7 @@ export const de_GetCustomModelCommand = async ( const doc = take(data, { baseModelArn: __expectString, creationTime: (_) => __expectNonNull(__parseRfc3339DateTimeWithOffset(_)), + customizationConfig: (_) => _json(__expectUnion(_)), customizationType: __expectString, hyperParameters: _json, jobArn: __expectString, @@ -1844,6 +1853,7 @@ export const de_GetModelCustomizationJobCommand = async ( baseModelArn: __expectString, clientRequestToken: __expectString, creationTime: (_) => __expectNonNull(__parseRfc3339DateTimeWithOffset(_)), + customizationConfig: (_) => _json(__expectUnion(_)), customizationType: __expectString, endTime: (_) => __expectNonNull(__parseRfc3339DateTimeWithOffset(_)), failureMessage: __expectString, @@ -2630,6 +2640,10 @@ const se_ByteContentDoc = (input: ByteContentDoc, context: __SerdeContext): any // se_CloudWatchConfig omitted. +// se_CustomizationConfig omitted. + +// se_DistillationConfig omitted. + // se_EvaluationBedrockModel omitted. // se_EvaluationConfig omitted. @@ -2830,6 +2844,10 @@ const se_GuardrailContextualGroundingPolicyConfig = ( // se_InferenceProfileModelSource omitted. +// se_InvocationLogsConfig omitted. + +// se_InvocationLogSource omitted. + /** * serializeAws_restJson1KbInferenceConfig */ @@ -2939,6 +2957,14 @@ const se_RagConfigs = (input: RAGConfig[], context: __SerdeContext): any => { // se_RAGStopSequences omitted. +// se_RequestMetadataBaseFilters omitted. + +// se_RequestMetadataFilters omitted. + +// se_RequestMetadataFiltersList omitted. + +// se_RequestMetadataMap omitted. + /** * serializeAws_restJson1RetrievalFilter */ @@ -3009,6 +3035,8 @@ const se_RetrieveConfig = (input: RetrieveConfig, context: __SerdeContext): any // se_TagList omitted. +// se_TeacherModelConfig omitted. + /** * serializeAws_restJson1TextInferenceConfig */ @@ -3078,6 +3106,8 @@ const de_ByteContentDoc = (output: any, context: __SerdeContext): ByteContentDoc // de_CloudWatchConfig omitted. +// de_CustomizationConfig omitted. + /** * deserializeAws_restJson1CustomModelSummary */ @@ -3105,6 +3135,8 @@ const de_CustomModelSummaryList = (output: any, context: __SerdeContext): Custom return retVal; }; +// de_DistillationConfig omitted. + // de_ErrorMessages omitted. // de_EvaluationBedrockModel omitted. @@ -3447,6 +3479,10 @@ const de_InferenceProfileSummary = (output: any, context: __SerdeContext): Infer // de_InferenceTypeList omitted. +// de_InvocationLogsConfig omitted. + +// de_InvocationLogSource omitted. + /** * deserializeAws_restJson1KbInferenceConfig */ @@ -3725,6 +3761,14 @@ const de_RagConfigs = (output: any, context: __SerdeContext): RAGConfig[] => { // de_RAGStopSequences omitted. +// de_RequestMetadataBaseFilters omitted. + +// de_RequestMetadataFilters omitted. + +// de_RequestMetadataFiltersList omitted. + +// de_RequestMetadataMap omitted. + /** * deserializeAws_restJson1RetrievalFilter */ @@ -3847,6 +3891,8 @@ const de_RetrieveConfig = (output: any, context: __SerdeContext): RetrieveConfig // de_TagList omitted. +// de_TeacherModelConfig omitted. + /** * deserializeAws_restJson1TextInferenceConfig */ diff --git a/codegen/sdk-codegen/aws-models/bedrock.json b/codegen/sdk-codegen/aws-models/bedrock.json index cba2fac52ecb..378b7dc3589d 100644 --- a/codegen/sdk-codegen/aws-models/bedrock.json +++ b/codegen/sdk-codegen/aws-models/bedrock.json @@ -1282,7 +1282,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a guardrail to block topics and to implement safeguards for your generative AI applications.

\n

You can configure the following policies in a guardrail to avoid undesirable and harmful content, filter \n out denied topics and words, and remove sensitive information for privacy protection.

\n
    \n
  • \n

    \n Content filters - Adjust filter strengths\n to block input prompts or model responses containing harmful content.

    \n
  • \n
  • \n

    \n Denied topics - Define a set of topics that\n are undesirable in the context of your application. These topics will be blocked if\n detected in user queries or model responses.

    \n
  • \n
  • \n

    \n Word filters - Configure filters to block\n undesirable words, phrases, and profanity. Such words can include offensive terms, \n competitor names etc.

    \n
  • \n
  • \n

    \n Sensitive information filters - Block or\n mask sensitive information such as personally identifiable information (PII) or custom \n regex in user inputs and model responses.

    \n
  • \n
\n

In addition to the above policies, you can also configure the messages to be returned to \n the user if a user input or model response is in violation of the policies defined in the guardrail.

\n

For more information, see Guardrails for Amazon Bedrock in\n the Amazon Bedrock User Guide.

", + "smithy.api#documentation": "

Creates a guardrail to block topics and to implement safeguards for your generative AI applications.

\n

You can configure the following policies in a guardrail to avoid undesirable and harmful content, filter \n out denied topics and words, and remove sensitive information for privacy protection.

\n
    \n
  • \n

    \n Content filters - Adjust filter strengths\n to block input prompts or model responses containing harmful content.

    \n
  • \n
  • \n

    \n Denied topics - Define a set of topics that\n are undesirable in the context of your application. These topics will be blocked if\n detected in user queries or model responses.

    \n
  • \n
  • \n

    \n Word filters - Configure filters to block\n undesirable words, phrases, and profanity. Such words can include offensive terms, \n competitor names etc.

    \n
  • \n
  • \n

    \n Sensitive information filters - Block or\n mask sensitive information such as personally identifiable information (PII) or custom \n regex in user inputs and model responses.

    \n
  • \n
\n

In addition to the above policies, you can also configure the messages to be returned to \n the user if a user input or model response is in violation of the policies defined in the guardrail.

\n

For more information, see Amazon Bedrock Guardrails in\n the Amazon Bedrock User Guide.

", "smithy.api#http": { "code": 202, "method": "POST", @@ -1824,8 +1824,7 @@ "hyperParameters": { "target": "com.amazonaws.bedrock#ModelCustomizationHyperParameters", "traits": { - "smithy.api#documentation": "

Parameters related to tuning the model. For details on the format for different models, see Custom model hyperparameters.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Parameters related to tuning the model. For details on the format for different models, see Custom model hyperparameters.

" } }, "vpcConfig": { @@ -1833,6 +1832,12 @@ "traits": { "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) that contains the resources that you're using for this job. For more information, see Protect your model customization jobs using a VPC.

" } + }, + "customizationConfig": { + "target": "com.amazonaws.bedrock#CustomizationConfig", + "traits": { + "smithy.api#documentation": "

The customization configuration for the model customization job.

" + } } }, "traits": { @@ -2068,7 +2073,7 @@ "vpcConfig": { "target": "com.amazonaws.bedrock#VpcConfig", "traits": { - "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

" + "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

" } }, "timeoutDurationInHours": { @@ -2288,6 +2293,20 @@ "target": "com.amazonaws.bedrock#CustomModelSummary" } }, + "com.amazonaws.bedrock#CustomizationConfig": { + "type": "union", + "members": { + "distillationConfig": { + "target": "com.amazonaws.bedrock#DistillationConfig", + "traits": { + "smithy.api#documentation": "

The distillation configuration for the custom model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A model customization configuration

" + } + }, "com.amazonaws.bedrock#CustomizationType": { "type": "enum", "members": { @@ -2302,6 +2321,12 @@ "traits": { "smithy.api#enumValue": "CONTINUED_PRE_TRAINING" } + }, + "DISTILLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISTILLATION" + } } } }, @@ -2660,6 +2685,21 @@ "smithy.api#output": {} } }, + "com.amazonaws.bedrock#DistillationConfig": { + "type": "structure", + "members": { + "teacherModelConfig": { + "target": "com.amazonaws.bedrock#TeacherModelConfig", + "traits": { + "smithy.api#documentation": "

The teacher model configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for distilling a foundation model into a smaller and more efficient model.

" + } + }, "com.amazonaws.bedrock#ErrorMessage": { "type": "string", "traits": { @@ -3799,6 +3839,12 @@ "smithy.api#documentation": "

Creation time of the model.

", "smithy.api#required": {} } + }, + "customizationConfig": { + "target": "com.amazonaws.bedrock#CustomizationConfig", + "traits": { + "smithy.api#documentation": "

The customization configuration for the custom model.

" + } } }, "traits": { @@ -4703,8 +4749,7 @@ "hyperParameters": { "target": "com.amazonaws.bedrock#ModelCustomizationHyperParameters", "traits": { - "smithy.api#documentation": "

The hyperparameter values for the job. For details on the format for different models, see Custom model hyperparameters.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The hyperparameter values for the job. For details on the format for different models, see Custom model hyperparameters.

" } }, "trainingDataConfig": { @@ -4757,6 +4802,12 @@ "traits": { "smithy.api#documentation": "

VPC configuration for the custom model job.

" } + }, + "customizationConfig": { + "target": "com.amazonaws.bedrock#CustomizationConfig", + "traits": { + "smithy.api#documentation": "

The customization configuration for the model customization job.

" + } } }, "traits": { @@ -4926,7 +4977,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets details about a batch inference job. For more information, see View details about a batch inference job\n

", + "smithy.api#documentation": "

Gets details about a batch inference job. For more information, see Monitor batch inference jobs\n

", "smithy.api#http": { "code": 200, "method": "GET", @@ -4990,7 +5041,7 @@ "status": { "target": "com.amazonaws.bedrock#ModelInvocationJobStatus", "traits": { - "smithy.api#documentation": "

The status of the batch inference job.

" + "smithy.api#documentation": "

The status of the batch inference job.

\n

The following statuses are possible:

\n
    \n
  • \n

    Submitted – This job has been submitted to a queue for validation.

    \n
  • \n
  • \n

    Validating – This job is being validated for the requirements described in Format and upload your batch inference data. The criteria include the following:

    \n
      \n
    • \n

      Your IAM service role has access to the Amazon S3 buckets containing your files.

      \n
    • \n
    • \n

      Your files are .jsonl files and each individual record is a JSON object in the correct format. Note that validation doesn't check if the modelInput value matches the request body for the model.

      \n
    • \n
    • \n

      Your files fulfill the requirements for file size and number of records. For more information, see Quotas for Amazon Bedrock.

      \n
    • \n
    \n
  • \n
  • \n

    Scheduled – This job has been validated and is now in a queue. The job will automatically start when it reaches its turn.

    \n
  • \n
  • \n

    Expired – This job timed out because it was scheduled but didn't begin before the set timeout duration. Submit a new job request.

    \n
  • \n
  • \n

    InProgress – This job has begun. You can start viewing the results in the output S3 location.

    \n
  • \n
  • \n

    Completed – This job has successfully completed. View the output files in the output S3 location.

    \n
  • \n
  • \n

    PartiallyCompleted – This job has partially completed. Not all of your records could be processed in time. View the output files in the output S3 location.

    \n
  • \n
  • \n

    Failed – This job has failed. Check the failure message for any further details. For further assistance, reach out to the Amazon Web Services Support Center.

    \n
  • \n
  • \n

    Stopped – This job was stopped by a user.

    \n
  • \n
  • \n

    Stopping – This job is being stopped by a user.

    \n
  • \n
" } }, "message": { @@ -5035,7 +5086,7 @@ "vpcConfig": { "target": "com.amazonaws.bedrock#VpcConfig", "traits": { - "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

" + "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

" } }, "timeoutDurationInHours": { @@ -7076,6 +7127,48 @@ "smithy.api#httpError": 500 } }, + "com.amazonaws.bedrock#InvocationLogSource": { + "type": "union", + "members": { + "s3Uri": { + "target": "com.amazonaws.bedrock#S3Uri", + "traits": { + "smithy.api#documentation": "

The URI of an invocation log in a bucket.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A storage location for invocation logs.

" + } + }, + "com.amazonaws.bedrock#InvocationLogsConfig": { + "type": "structure", + "members": { + "usePromptResponse": { + "target": "com.amazonaws.bedrock#UsePromptResponse", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Whether to use the model's response for training, or just the prompt. The default value is False.

" + } + }, + "invocationLogSource": { + "target": "com.amazonaws.bedrock#InvocationLogSource", + "traits": { + "smithy.api#documentation": "

The source of the invocation logs.

", + "smithy.api#required": {} + } + }, + "requestMetadataFilters": { + "target": "com.amazonaws.bedrock#RequestMetadataFilters", + "traits": { + "smithy.api#documentation": "

Rules for filtering invocation logs based on request metadata.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for using invocation logs to customize a model.

" + } + }, "com.amazonaws.bedrock#JobName": { "type": "string", "traits": { @@ -8288,7 +8381,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all batch inference jobs in the account. For more information, see View details about a batch inference job.

", + "smithy.api#documentation": "

Lists all batch inference jobs in the account. For more information, see View details about a batch inference job.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -8323,7 +8416,7 @@ "statusEquals": { "target": "com.amazonaws.bedrock#ModelInvocationJobStatus", "traits": { - "smithy.api#documentation": "

Specify a status to filter for batch inference jobs whose statuses match the string you specify.

", + "smithy.api#documentation": "

Specify a status to filter for batch inference jobs whose statuses match the string you specify.

\n

The following statuses are possible:

\n
    \n
  • \n

    Submitted – This job has been submitted to a queue for validation.

    \n
  • \n
  • \n

    Validating – This job is being validated for the requirements described in Format and upload your batch inference data. The criteria include the following:

    \n
      \n
    • \n

      Your IAM service role has access to the Amazon S3 buckets containing your files.

      \n
    • \n
    • \n

      Your files are .jsonl files and each individual record is a JSON object in the correct format. Note that validation doesn't check if the modelInput value matches the request body for the model.

      \n
    • \n
    • \n

      Your files fulfill the requirements for file size and number of records. For more information, see Quotas for Amazon Bedrock.

      \n
    • \n
    \n
  • \n
  • \n

    Scheduled – This job has been validated and is now in a queue. The job will automatically start when it reaches its turn.

    \n
  • \n
  • \n

    Expired – This job timed out because it was scheduled but didn't begin before the set timeout duration. Submit a new job request.

    \n
  • \n
  • \n

    InProgress – This job has begun. You can start viewing the results in the output S3 location.

    \n
  • \n
  • \n

    Completed – This job has successfully completed. View the output files in the output S3 location.

    \n
  • \n
  • \n

    PartiallyCompleted – This job has partially completed. Not all of your records could be processed in time. View the output files in the output S3 location.

    \n
  • \n
  • \n

    Failed – This job has failed. Check the failure message for any further details. For further assistance, reach out to the Amazon Web Services Support Center.

    \n
  • \n
  • \n

    Stopped – This job was stopped by a user.

    \n
  • \n
  • \n

    Stopping – This job is being stopped by a user.

    \n
  • \n
", "smithy.api#httpQuery": "statusEquals" } }, @@ -8620,6 +8713,12 @@ "traits": { "smithy.api#documentation": "

Set to include embeddings data in the log delivery.

" } + }, + "videoDataDeliveryEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Set to include video data in the log delivery.

" + } } }, "traits": { @@ -8829,6 +8928,12 @@ "traits": { "smithy.api#enumValue": "CONTINUED_PRE_TRAINING" } + }, + "DISTILLATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISTILLATION" + } } } }, @@ -9391,7 +9496,7 @@ "status": { "target": "com.amazonaws.bedrock#ModelInvocationJobStatus", "traits": { - "smithy.api#documentation": "

The status of the batch inference job.

" + "smithy.api#documentation": "

The status of the batch inference job.

\n

The following statuses are possible:

\n
    \n
  • \n

    Submitted – This job has been submitted to a queue for validation.

    \n
  • \n
  • \n

    Validating – This job is being validated for the requirements described in Format and upload your batch inference data. The criteria include the following:

    \n
      \n
    • \n

      Your IAM service role has access to the Amazon S3 buckets containing your files.

      \n
    • \n
    • \n

      Your files are .jsonl files and each individual record is a JSON object in the correct format. Note that validation doesn't check if the modelInput value matches the request body for the model.

      \n
    • \n
    • \n

      Your files fulfill the requirements for file size and number of records. For more information, see Quotas for Amazon Bedrock.

      \n
    • \n
    \n
  • \n
  • \n

    Scheduled – This job has been validated and is now in a queue. The job will automatically start when it reaches its turn.

    \n
  • \n
  • \n

    Expired – This job timed out because it was scheduled but didn't begin before the set timeout duration. Submit a new job request.

    \n
  • \n
  • \n

    InProgress – This job has begun. You can start viewing the results in the output S3 location.

    \n
  • \n
  • \n

    Completed – This job has successfully completed. View the output files in the output S3 location.

    \n
  • \n
  • \n

    PartiallyCompleted – This job has partially completed. Not all of your records could be processed in time. View the output files in the output S3 location.

    \n
  • \n
  • \n

    Failed – This job has failed. Check the failure message for any further details. For further assistance, reach out to the Amazon Web Services Support Center.

    \n
  • \n
  • \n

    Stopped – This job was stopped by a user.

    \n
  • \n
  • \n

    Stopping – This job is being stopped by a user.

    \n
  • \n
" } }, "message": { @@ -9436,7 +9541,7 @@ "vpcConfig": { "target": "com.amazonaws.bedrock#VpcConfig", "traits": { - "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

" + "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

" } }, "timeoutDurationInHours": { @@ -9901,6 +10006,100 @@ } } }, + "com.amazonaws.bedrock#RequestMetadataBaseFilters": { + "type": "structure", + "members": { + "equals": { + "target": "com.amazonaws.bedrock#RequestMetadataMap", + "traits": { + "smithy.api#documentation": "

Include results where the key equals the value.

" + } + }, + "notEquals": { + "target": "com.amazonaws.bedrock#RequestMetadataMap", + "traits": { + "smithy.api#documentation": "

Include results where the key does not equal the value.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A mapping of a metadata key to a value that it should or should not equal.

" + } + }, + "com.amazonaws.bedrock#RequestMetadataFilters": { + "type": "union", + "members": { + "equals": { + "target": "com.amazonaws.bedrock#RequestMetadataMap", + "traits": { + "smithy.api#documentation": "

Include results where the key equals the value.

" + } + }, + "notEquals": { + "target": "com.amazonaws.bedrock#RequestMetadataMap", + "traits": { + "smithy.api#documentation": "

Include results where the key does not equal the value.

" + } + }, + "andAll": { + "target": "com.amazonaws.bedrock#RequestMetadataFiltersList", + "traits": { + "smithy.api#documentation": "

Include results where all of the based filters match.

" + } + }, + "orAll": { + "target": "com.amazonaws.bedrock#RequestMetadataFiltersList", + "traits": { + "smithy.api#documentation": "

Include results where any of the base filters match.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Rules for filtering invocation logs. A filter can be a mapping of a metadata\n key to a value that it should or should not equal (a base filter), or a list of base filters\n that are all applied with AND or OR logical operators

" + } + }, + "com.amazonaws.bedrock#RequestMetadataFiltersList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrock#RequestMetadataBaseFilters" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 16 + } + } + }, + "com.amazonaws.bedrock#RequestMetadataMap": { + "type": "map", + "key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\s._:/=+$@-]{1,256}$" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\s._:/=+$@-]{0,256}$" + } + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.bedrock#ResourceNotFoundException": { "type": "structure", "members": { @@ -10420,7 +10619,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a batch inference job. You're only charged for tokens that were already processed. For more information, see Stop a batch inference job.

", + "smithy.api#documentation": "

Stops a batch inference job. You're only charged for tokens that were already processed. For more information, see Stop a batch inference job.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -10614,7 +10813,7 @@ "min": 20, "max": 1011 }, - "smithy.api#pattern": "(^[a-zA-Z0-9][a-zA-Z0-9\\-]*$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:([0-9]{12}|)((:(fine-tuning-job|model-customization-job|custom-model)/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}(/[a-z0-9]{12})$)|(:guardrail/[a-z0-9]+$)|(:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+$)|(:(provisioned-model|model-invocation-job|model-evaluation-job|evaluation-job|model-import-job|imported-model)/[a-z0-9]{12}$)))" + "smithy.api#pattern": "(^[a-zA-Z0-9][a-zA-Z0-9\\-]*$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:([0-9]{12}|)((:(fine-tuning-job|model-customization-job|custom-model)/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}(/[a-z0-9]{12})$)|(:guardrail/[a-z0-9]+$)|(:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+$)|(:(provisioned-model|model-invocation-job|model-evaluation-job|evaluation-job|model-import-job|imported-model|async-invoke)/[a-z0-9]{12}$)))" } }, "com.amazonaws.bedrock#TaggingResource": { @@ -10631,6 +10830,33 @@ } ] }, + "com.amazonaws.bedrock#TeacherModelConfig": { + "type": "structure", + "members": { + "teacherModelIdentifier": { + "target": "com.amazonaws.bedrock#TeacherModelIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the teacher model.

", + "smithy.api#required": {} + } + }, + "maxResponseLengthForInference": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens requested when the customization job invokes the teacher model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about a teacher model used for model customization.

" + } + }, + "com.amazonaws.bedrock#TeacherModelIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})|(([0-9a-zA-Z][_-]?)+)$" + } + }, "com.amazonaws.bedrock#Temperature": { "type": "float", "traits": { @@ -10735,8 +10961,13 @@ "s3Uri": { "target": "com.amazonaws.bedrock#S3Uri", "traits": { - "smithy.api#documentation": "

The S3 URI where the training data is stored.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The S3 URI where the training data is stored.

" + } + }, + "invocationLogsConfig": { + "target": "com.amazonaws.bedrock#InvocationLogsConfig", + "traits": { + "smithy.api#documentation": "

Settings for using invocation logs to customize a model.

" } } }, @@ -11064,6 +11295,12 @@ "smithy.api#output": {} } }, + "com.amazonaws.bedrock#UsePromptResponse": { + "type": "boolean", + "traits": { + "smithy.api#default": false + } + }, "com.amazonaws.bedrock#ValidationDataConfig": { "type": "structure", "members": {