diff --git a/.changes/1.35.49.json b/.changes/1.35.49.json new file mode 100644 index 0000000000..e57b9a2eb7 --- /dev/null +++ b/.changes/1.35.49.json @@ -0,0 +1,27 @@ +[ + { + "category": "``bedrock-agent``", + "description": "Add support of new model types for Bedrock Agents, Adding inference profile support for Flows and Prompt Management, Adding new field to configure additional inference configurations for Flows and Prompt Management", + "type": "api-change" + }, + { + "category": "``codebuild``", + "description": "AWS CodeBuild now supports automatically retrying failed builds", + "type": "api-change" + }, + { + "category": "``lambda``", + "description": "Add TagsError field in Lambda GetFunctionResponse. The TagsError field contains details related to errors retrieving tags.", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Adding inferred token name for dynamic tokens in Anomalies.", + "type": "api-change" + }, + { + "category": "``supplychain``", + "description": "API doc updates, and also support showing error message on a failed instance", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 8747bcc367..88f1a344ee 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,16 @@ CHANGELOG ========= +1.35.49 +======= + +* api-change:``bedrock-agent``: Add support of new model types for Bedrock Agents, Adding inference profile support for Flows and Prompt Management, Adding new field to configure additional inference configurations for Flows and Prompt Management +* api-change:``codebuild``: AWS CodeBuild now supports automatically retrying failed builds +* api-change:``lambda``: Add TagsError field in Lambda GetFunctionResponse. The TagsError field contains details related to errors retrieving tags. +* api-change:``logs``: Adding inferred token name for dynamic tokens in Anomalies. +* api-change:``supplychain``: API doc updates, and also support showing error message on a failed instance + + 1.35.48 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 7e5a7ba7e3..dc07ab329d 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.48' +__version__ = '1.35.49' class NullHandler(logging.Handler): diff --git a/botocore/data/bedrock-agent/2023-06-05/service-2.json b/botocore/data/bedrock-agent/2023-06-05/service-2.json index 081f0f2300..89a8d37bb5 100644 --- a/botocore/data/bedrock-agent/2023-06-05/service-2.json +++ b/botocore/data/bedrock-agent/2023-06-05/service-2.json @@ -2333,7 +2333,7 @@ }, "foundationModel":{ "shape":"ModelIdentifier", - "documentation":"

The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create.

" + "documentation":"

The identifier for the model that you want to be used for orchestration by the agent you create.

The modelId to provide depends on the type of model or throughput that you use:

" }, "guardrailConfiguration":{ "shape":"GuardrailConfiguration", @@ -3514,6 +3514,12 @@ "members":{ } }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, "DraftVersion":{ "type":"string", "max":5, @@ -3699,8 +3705,7 @@ "documentation":"

A name for the condition that you can reference.

" } }, - "documentation":"

Defines a condition in the condition node.

", - "sensitive":true + "documentation":"

Defines a condition in the condition node.

" }, "FlowConditionExpression":{ "type":"string", @@ -3727,8 +3732,7 @@ "type":"list", "member":{"shape":"FlowCondition"}, "max":5, - "min":1, - "sensitive":true + "min":1 }, "FlowConnection":{ "type":"structure", @@ -3824,7 +3828,8 @@ "documentation":"

An array of node definitions in the flow.

" } }, - "documentation":"

The definition of the nodes and connections between nodes in the flow.

" + "documentation":"

The definition of the nodes and connections between nodes in the flow.

", + "sensitive":true }, "FlowDescription":{ "type":"string", @@ -4036,8 +4041,7 @@ "type":"list", "member":{"shape":"FlowNode"}, "max":20, - "min":0, - "sensitive":true + "min":0 }, "FlowStatus":{ "type":"string", @@ -5226,9 +5230,10 @@ }, "KnowledgeBaseModelIdentifier":{ "type":"string", + "documentation":"

ARN or Id of a Bedrock Foundational Model or Inference Profile, or the ARN of a jumpstart model or imported model, or a provisioned throughput ARN for custom models.

", "max":2048, "min":1, - "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" }, "KnowledgeBaseRoleArn":{ "type":"string", @@ -5847,9 +5852,10 @@ }, "ModelIdentifier":{ "type":"string", + "documentation":"

ARN or Id of a Bedrock Foundational Model or Inference Profile, or the ARN of a jumpstart model or imported model, or a provisioned throughput ARN for custom models.

", "max":2048, "min":1, - "pattern":"^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)$" + "pattern":"^(arn:aws(-[^:]{1,12})?:(bedrock|sagemaker):[a-z0-9-]{1,20}:([0-9]{12})?:([a-z-]+/)?)?([a-zA-Z0-9.-]{1,63}){0,2}(([:][a-z0-9-]{1,63}){0,2})?(/[a-z0-9]{1,12})?$" }, "MongoDbAtlasCollectionName":{ "type":"string", @@ -6323,6 +6329,10 @@ "templateType" ], "members":{ + "additionalModelRequestFields":{ + "shape":"Document", + "documentation":"

Contains model-specific inference configurations that aren't in the inferenceConfiguration field. To see model-specific inference parameters, see Inference request parameters and response fields for foundation models.

" + }, "inferenceConfiguration":{ "shape":"PromptInferenceConfiguration", "documentation":"

Contains inference configurations for the prompt.

" @@ -6450,9 +6460,10 @@ }, "PromptModelIdentifier":{ "type":"string", + "documentation":"

ARN or Id of a Bedrock Foundational Model or Inference Profile, or the ARN of a jumpstart model or imported model, or a provisioned throughput ARN for custom models.

", "max":2048, "min":1, - "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" }, "PromptModelInferenceConfiguration":{ "type":"structure", @@ -6583,6 +6594,10 @@ "templateType" ], "members":{ + "additionalModelRequestFields":{ + "shape":"Document", + "documentation":"

Contains model-specific inference configurations that aren't in the inferenceConfiguration field. To see model-specific inference parameters, see Inference request parameters and response fields for foundation models.

" + }, "inferenceConfiguration":{ "shape":"PromptInferenceConfiguration", "documentation":"

Contains inference configurations for the prompt variant.

" @@ -7676,7 +7691,7 @@ }, "foundationModel":{ "shape":"ModelIdentifier", - "documentation":"

Specifies a new foundation model to be used for orchestration by the agent.

" + "documentation":"

The identifier for the model that you want to be used for orchestration by the agent you create.

The modelId to provide depends on the type of model or throughput that you use:

" }, "guardrailConfiguration":{ "shape":"GuardrailConfiguration", diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index 6c4e668698..c4090a825c 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -739,6 +739,28 @@ "SECRETS_MANAGER" ] }, + "AutoRetryConfig":{ + "type":"structure", + "members":{ + "autoRetryLimit":{ + "shape":"WrapperInt", + "documentation":"

The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times.

" + }, + "autoRetryNumber":{ + "shape":"WrapperInt", + "documentation":"

The number of times that the build has been retried. The initial build will have an auto-retry number of 0.

" + }, + "nextAutoRetry":{ + "shape":"String", + "documentation":"

The build ARN of the auto-retried build triggered by the current build. The next auto-retry will be null for builds that don't trigger an auto-retry.

" + }, + "previousAutoRetry":{ + "shape":"String", + "documentation":"

The build ARN of the build that triggered the current auto-retry build. The previous auto-retry will be null for the initial build.

" + } + }, + "documentation":"

Information about the auto-retry configuration for the build.

" + }, "BatchDeleteBuildsInput":{ "type":"structure", "required":["ids"], @@ -1061,6 +1083,10 @@ "buildBatchArn":{ "shape":"String", "documentation":"

The ARN of the batch build that this build is a member of, if applicable.

" + }, + "autoRetryConfig":{ + "shape":"AutoRetryConfig", + "documentation":"

Information about the auto-retry configuration for the build.

" } }, "documentation":"

Information about a build.

" @@ -1727,6 +1753,10 @@ "concurrentBuildLimit":{ "shape":"WrapperInt", "documentation":"

The maximum number of concurrent builds that are allowed for this project.

New builds are only started if the current number of builds is less than or equal to this limit. If the current build count meets this limit, new builds are throttled and are not run.

" + }, + "autoRetryLimit":{ + "shape":"WrapperInt", + "documentation":"

The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times.

" } } }, @@ -3199,6 +3229,10 @@ "resourceAccessRole":{ "shape":"NonEmptyString", "documentation":"

The ARN of the IAM role that enables CodeBuild to access the CloudWatch Logs and Amazon S3 artifacts for the project's builds.

" + }, + "autoRetryLimit":{ + "shape":"WrapperInt", + "documentation":"

The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times.

" } }, "documentation":"

Information about a build project.

" @@ -4402,6 +4436,10 @@ "fleetOverride":{ "shape":"ProjectFleet", "documentation":"

A ProjectFleet object specified for this build that overrides the one defined in the build project.

" + }, + "autoRetryLimitOverride":{ + "shape":"WrapperInt", + "documentation":"

The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times.

" } } }, @@ -4729,6 +4767,10 @@ "concurrentBuildLimit":{ "shape":"WrapperInt", "documentation":"

The maximum number of concurrent builds that are allowed for this project.

New builds are only started if the current number of builds is less than or equal to this limit. If the current build count meets this limit, new builds are throttled and are not run.

To remove this limit, set this value to -1.

" + }, + "autoRetryLimit":{ + "shape":"WrapperInt", + "documentation":"

The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times.

" } } }, diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 3442dc5920..622af11a63 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -13262,6 +13262,7 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-4" : { }, @@ -13271,6 +13272,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -28113,6 +28115,36 @@ } } }, + "redshift-serverless" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, "rekognition" : { "endpoints" : { "rekognition-fips.us-gov-west-1" : { diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index 49d0b40b55..bb88177b62 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -3229,7 +3229,11 @@ }, "Tags":{ "shape":"Tags", - "documentation":"

The function's tags.

" + "documentation":"

The function's tags. Lambda returns tag data only if you have explicit allow permissions for lambda:ListTags.

" + }, + "TagsError":{ + "shape":"TagsError", + "documentation":"

An object that contains details about an error related to retrieving tags.

" }, "Concurrency":{ "shape":"Concurrency", @@ -5766,6 +5770,36 @@ "key":{"shape":"TagKey"}, "value":{"shape":"TagValue"} }, + "TagsError":{ + "type":"structure", + "required":[ + "ErrorCode", + "Message" + ], + "members":{ + "ErrorCode":{ + "shape":"TagsErrorCode", + "documentation":"

The error code.

" + }, + "Message":{ + "shape":"TagsErrorMessage", + "documentation":"

The error message.

" + } + }, + "documentation":"

An object that contains details about an error related to retrieving tags.

" + }, + "TagsErrorCode":{ + "type":"string", + "max":21, + "min":10, + "pattern":"[A-Za-z]+Exception" + }, + "TagsErrorMessage":{ + "type":"string", + "max":1000, + "min":84, + "pattern":"^.*$" + }, "ThrottleReason":{ "type":"string", "enum":[ diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index f92baf9c9a..4c0aebecd0 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -61,7 +61,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created.

Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

You can't update an existing delivery. You can only create and delete deliveries.

" + "documentation":"

Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created.

Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

To update an existing delivery configuration, use UpdateDeliveryConfiguration.

" }, "CreateExportTask":{ "name":"CreateExportTask", @@ -479,7 +479,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.

CloudWatch Logs doesn’t support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

" + "documentation":"

Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.

CloudWatch Logs doesn't support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

" }, "DescribeLogStreams":{ "name":"DescribeLogStreams", @@ -1014,7 +1014,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group.

CloudWatch Logs doesn’t immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer.

To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven’t been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted.

When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the storedBytes value to see how many bytes a log group is storing.

" + "documentation":"

Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group.

CloudWatch Logs doesn't immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer.

To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven't been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted.

When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the storedBytes value to see how many bytes a log group is storing.

" }, "PutSubscriptionFilter":{ "name":"PutSubscriptionFilter", @@ -1093,7 +1093,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

The TagLogGroup operation is on the path to deprecation. We recommend that you use TagResource instead.

Adds or updates the specified tags for the specified log group.

To list the tags for a log group, use ListTagsForResource. To remove tags, use UntagResource.

For more information about tags, see Tag Log Groups in Amazon CloudWatch Logs in the Amazon CloudWatch Logs User Guide.

CloudWatch Logs doesn’t support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.

", + "documentation":"

The TagLogGroup operation is on the path to deprecation. We recommend that you use TagResource instead.

Adds or updates the specified tags for the specified log group.

To list the tags for a log group, use ListTagsForResource. To remove tags, use UntagResource.

For more information about tags, see Tag Log Groups in Amazon CloudWatch Logs in the Amazon CloudWatch Logs User Guide.

CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.

", "deprecated":true, "deprecatedMessage":"Please use the generic tagging API TagResource" }, @@ -1136,7 +1136,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

The UntagLogGroup operation is on the path to deprecation. We recommend that you use UntagResource instead.

Removes the specified tags from the specified log group.

To list the tags for a log group, use ListTagsForResource. To add tags, use TagResource.

CloudWatch Logs doesn’t support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys.

", + "documentation":"

The UntagLogGroup operation is on the path to deprecation. We recommend that you use UntagResource instead.

Removes the specified tags from the specified log group.

To list the tags for a log group, use ListTagsForResource. To add tags, use TagResource.

CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys.

", "deprecated":true, "deprecatedMessage":"Please use the generic tagging API UntagResource" }, @@ -1167,7 +1167,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"OperationAbortedException"} ], - "documentation":"

Use this operation to suppress anomaly detection for a specified anomaly or pattern. If you suppress an anomaly, CloudWatch Logs won’t report new occurrences of that anomaly and won't update that anomaly with new data. If you suppress a pattern, CloudWatch Logs won’t report any anomalies related to that pattern.

You must specify either anomalyId or patternId, but you can't specify both parameters in the same operation.

If you have previously used this operation to suppress detection of a pattern or anomaly, you can use it again to cause CloudWatch Logs to end the suppression. To do this, use this operation and specify the anomaly or pattern to stop suppressing, and omit the suppressionType and suppressionPeriod parameters.

" + "documentation":"

Use this operation to suppress anomaly detection for a specified anomaly or pattern. If you suppress an anomaly, CloudWatch Logs won't report new occurrences of that anomaly and won't update that anomaly with new data. If you suppress a pattern, CloudWatch Logs won't report any anomalies related to that pattern.

You must specify either anomalyId or patternId, but you can't specify both parameters in the same operation.

If you have previously used this operation to suppress detection of a pattern or anomaly, you can use it again to cause CloudWatch Logs to end the suppression. To do this, use this operation and specify the anomaly or pattern to stop suppressing, and omit the suppressionType and suppressionPeriod parameters.

" }, "UpdateDeliveryConfiguration":{ "name":"UpdateDeliveryConfiguration", @@ -1512,7 +1512,7 @@ }, "defaultDeliveryConfigValues":{ "shape":"ConfigurationTemplateDeliveryConfigValues", - "documentation":"

A mapping that displays the default value of each property within a delivery’s configuration, if it is not specified in the request.

" + "documentation":"

A mapping that displays the default value of each property within a delivery's configuration, if it is not specified in the request.

" }, "allowedFields":{ "shape":"AllowedFields", @@ -1584,7 +1584,7 @@ }, "recordFields":{ "shape":"RecordFields", - "documentation":"

The list of record fields to be delivered to the destination, in order. If the delivery’s log source has mandatory fields, they must be included in this list.

" + "documentation":"

The list of record fields to be delivered to the destination, in order. If the delivery's log source has mandatory fields, they must be included in this list.

" }, "fieldDelimiter":{ "shape":"FieldDelimiter", @@ -1592,7 +1592,7 @@ }, "s3DeliveryConfiguration":{ "shape":"S3DeliveryConfiguration", - "documentation":"

This structure contains parameters that are valid only when the delivery’s delivery destination is an S3 bucket.

" + "documentation":"

This structure contains parameters that are valid only when the delivery's delivery destination is an S3 bucket.

" }, "tags":{ "shape":"Tags", @@ -1644,7 +1644,7 @@ }, "destinationPrefix":{ "shape":"ExportDestinationPrefix", - "documentation":"

The prefix used as the start of the key for every object exported. If you don't specify a value, the default is exportedlogs.

" + "documentation":"

The prefix used as the start of the key for every object exported. If you don't specify a value, the default is exportedlogs.

The length of this parameter must comply with the S3 object key name length limits. The object key name is a sequence of Unicode characters with UTF-8 encoding, and can be up to 1,024 bytes.

" } } }, @@ -1992,7 +1992,7 @@ "documentation":"

The tags that have been assigned to this delivery.

" } }, - "documentation":"

This structure contains information about one delivery in your account.

A delivery is a connection between a logical delivery source and a logical delivery destination.

For more information, see CreateDelivery.

You can't update an existing delivery. You can only create and delete deliveries.

" + "documentation":"

This structure contains information about one delivery in your account.

A delivery is a connection between a logical delivery source and a logical delivery destination.

For more information, see CreateDelivery.

To update an existing delivery configuration, use UpdateDeliveryConfiguration.

" }, "DeliveryDestination":{ "type":"structure", @@ -3244,6 +3244,10 @@ "value":{"shape":"Count"} }, "IncludeLinkedAccounts":{"type":"boolean"}, + "InferredTokenName":{ + "type":"string", + "min":1 + }, "InheritedProperties":{ "type":"list", "member":{"shape":"InheritedProperty"} @@ -3909,6 +3913,10 @@ "enumerations":{ "shape":"Enumerations", "documentation":"

Contains the values found for a dynamic token, and the number of times each value was found.

" + }, + "inferredTokenName":{ + "shape":"InferredTokenName", + "documentation":"

A name that CloudWatch Logs assigned to this dynamic token to make the pattern more readable. The string part of the inferredTokenName gives you a clearer idea of the content of this token. The number part of the inferredTokenName shows where in the pattern this token appears, compared to other dynamic tokens. CloudWatch Logs assigns the string part of the name based on analyzing the content of the log events that contain it.

For example, an inferred token name of IPAddress-3 means that the token represents an IP address, and this token is the third dynamic token in the pattern.

" } }, "documentation":"

A structure that contains information about one pattern token related to an anomaly.

For more information about patterns and tokens, see CreateLogAnomalyDetector.

" @@ -4850,7 +4858,7 @@ }, "limit":{ "shape":"EventsLimit", - "documentation":"

The maximum number of log events to return in the query. If the query string uses the fields command, only the specified fields and their values are returned. The default is 1000.

" + "documentation":"

The maximum number of log events to return in the query. If the query string uses the fields command, only the specified fields and their values are returned. The default is 10,000.

" } } }, @@ -5175,7 +5183,7 @@ }, "recordFields":{ "shape":"RecordFields", - "documentation":"

The list of record fields to be delivered to the destination, in order. If the delivery’s log source has mandatory fields, they must be included in this list.

" + "documentation":"

The list of record fields to be delivered to the destination, in order. If the delivery's log source has mandatory fields, they must be included in this list.

" }, "fieldDelimiter":{ "shape":"FieldDelimiter", @@ -5183,7 +5191,7 @@ }, "s3DeliveryConfiguration":{ "shape":"S3DeliveryConfiguration", - "documentation":"

This structure contains parameters that are valid only when the delivery’s delivery destination is an S3 bucket.

" + "documentation":"

This structure contains parameters that are valid only when the delivery's delivery destination is an S3 bucket.

" } } }, diff --git a/botocore/data/supplychain/2024-01-01/service-2.json b/botocore/data/supplychain/2024-01-01/service-2.json index a344dd6441..142dc13255 100644 --- a/botocore/data/supplychain/2024-01-01/service-2.json +++ b/botocore/data/supplychain/2024-01-01/service-2.json @@ -52,7 +52,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Create DataIntegrationFlow to map one or more different sources to one target using the SQL transformation query.

", + "documentation":"

Enables you to programmatically create a data pipeline to ingest data from source systems such as Amazon S3 buckets, to a predefined Amazon Web Services Supply Chain dataset (product, inbound_order) or a temporary dataset along with the data transformation query provided with the API.

", "idempotent":true }, "CreateDataLakeDataset":{ @@ -73,7 +73,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Create a data lake dataset.

", + "documentation":"

Enables you to programmatically create an Amazon Web Services Supply Chain data lake dataset. Developers can create the datasets using their pre-defined or custom schema for a given instance ID, namespace, and dataset name.

", "idempotent":true }, "CreateInstance":{ @@ -94,7 +94,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Create a new instance for AWS Supply Chain. This is an asynchronous operation. Upon receiving a CreateInstance request, AWS Supply Chain immediately returns the instance resource, with instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance.

", + "documentation":"

Enables you to programmatically create an Amazon Web Services Supply Chain instance by applying KMS keys and relevant information associated with the API without using the Amazon Web Services console.

This is an asynchronous operation. Upon receiving a CreateInstance request, Amazon Web Services Supply Chain immediately returns the instance resource, instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance. If the instance results in an unhealthy state, you need to check the error message, delete the current instance, and recreate a new one based on the mitigation from the error message.

", "idempotent":true }, "DeleteDataIntegrationFlow":{ @@ -115,7 +115,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Delete the DataIntegrationFlow.

", + "documentation":"

Enable you to programmatically delete an existing data pipeline for the provided Amazon Web Services Supply Chain instance and DataIntegrationFlow name.

", "idempotent":true }, "DeleteDataLakeDataset":{ @@ -136,7 +136,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Delete a data lake dataset.

", + "documentation":"

Enables you to programmatically delete an Amazon Web Services Supply Chain data lake dataset. Developers can delete the existing datasets for a given instance ID, namespace, and instance name.

", "idempotent":true }, "DeleteInstance":{ @@ -157,7 +157,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Delete the instance. This is an asynchronous operation. Upon receiving a DeleteInstance request, AWS Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status.

", + "documentation":"

Enables you to programmatically delete an Amazon Web Services Supply Chain instance by deleting the KMS keys and relevant information associated with the API without using the Amazon Web Services console.

This is an asynchronous operation. Upon receiving a DeleteInstance request, Amazon Web Services Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status.

", "idempotent":true }, "GetBillOfMaterialsImportJob":{ @@ -198,7 +198,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

View the DataIntegrationFlow details.

" + "documentation":"

Enables you to programmatically view a specific data pipeline for the provided Amazon Web Services Supply Chain instance and DataIntegrationFlow name.

" }, "GetDataLakeDataset":{ "name":"GetDataLakeDataset", @@ -218,7 +218,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Get a data lake dataset.

" + "documentation":"

Enables you to programmatically view an Amazon Web Services Supply Chain data lake dataset. Developers can view the data lake dataset information such as namespace, schema, and so on for a given instance ID, namespace, and dataset name.

" }, "GetInstance":{ "name":"GetInstance", @@ -238,7 +238,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Get the AWS Supply Chain instance details.

" + "documentation":"

Enables you to programmatically retrieve the information related to an Amazon Web Services Supply Chain instance ID.

" }, "ListDataIntegrationFlows":{ "name":"ListDataIntegrationFlows", @@ -258,7 +258,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Lists all the DataIntegrationFlows in a paginated way.

" + "documentation":"

Enables you to programmatically list all data pipelines for the provided Amazon Web Services Supply Chain instance.

" }, "ListDataLakeDatasets":{ "name":"ListDataLakeDatasets", @@ -278,7 +278,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

List the data lake datasets for a specific instance and name space.

" + "documentation":"

Enables you to programmatically view the list of Amazon Web Services Supply Chain data lake datasets. Developers can view the datasets and the corresponding information such as namespace, schema, and so on for a given instance ID and namespace.

" }, "ListInstances":{ "name":"ListInstances", @@ -298,7 +298,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

List all the AWS Supply Chain instances in a paginated way.

" + "documentation":"

List all Amazon Web Services Supply Chain instances for a specific account. Enables you to programmatically list all Amazon Web Services Supply Chain instances based on their account ID, instance name, and state of the instance (active or delete).

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -318,7 +318,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

List all the tags for an Amazon Web ServicesSupply Chain resource.

" + "documentation":"

List all the tags for an Amazon Web ServicesSupply Chain resource. You can list all the tags added to a resource. By listing the tags, developers can view the tag level information on a resource and perform actions such as, deleting a resource associated with a particular tag.

" }, "SendDataIntegrationEvent":{ "name":"SendDataIntegrationEvent", @@ -359,7 +359,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Create tags for an Amazon Web Services Supply chain resource.

" + "documentation":"

You can create tags during or after creating a resource such as instance, data flow, or dataset in AWS Supply chain. During the data ingestion process, you can add tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. You can use these tags to identify a group of resources or a single resource used by the developer.

" }, "UntagResource":{ "name":"UntagResource", @@ -379,7 +379,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Delete tags for an Amazon Web Services Supply chain resource.

", + "documentation":"

You can delete tags for an Amazon Web Services Supply chain resource such as instance, data flow, or dataset in AWS Supply Chain. During the data ingestion process, you can delete tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets.

", "idempotent":true }, "UpdateDataIntegrationFlow":{ @@ -400,7 +400,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Update the DataIntegrationFlow.

" + "documentation":"

Enables you to programmatically update an existing data pipeline to ingest data from the source systems such as, Amazon S3 buckets, to a predefined Amazon Web Services Supply Chain dataset (product, inbound_order) or a temporary dataset along with the data transformation query provided with the API.

" }, "UpdateDataLakeDataset":{ "name":"UpdateDataLakeDataset", @@ -420,7 +420,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Update a data lake dataset.

" + "documentation":"

Enables you to programmatically update an Amazon Web Services Supply Chain data lake dataset. Developers can update the description of a data lake dataset for a given instance ID, namespace, and dataset name.

" }, "UpdateInstance":{ "name":"UpdateInstance", @@ -440,7 +440,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

Update the instance.

" + "documentation":"

Enables you to programmatically update an Amazon Web Services Supply Chain instance description by providing all the relevant information such as account ID, instance ID and so on without using the AWS console.

" } }, "shapes":{ @@ -1469,6 +1469,10 @@ "shape":"InstanceState", "documentation":"

The state of the instance.

" }, + "errorMessage":{ + "shape":"String", + "documentation":"

The Amazon Web Services Supply Chain instance error message. If the instance results in an unhealthy state, customers need to check the error message, delete the current instance, and recreate a new one based on the mitigation from the error message.

" + }, "webAppDnsDomain":{ "shape":"InstanceWebAppDnsDomain", "documentation":"

The WebApp DNS domain name of the instance.

" diff --git a/docs/source/conf.py b/docs/source/conf.py index 9701a7df63..c166c8a85f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.48' +release = '1.35.49' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.