From 1b6d6fa9504cd3157e0f8a34a61b9dd1e2a5c6db Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Mon, 14 Oct 2024 18:05:18 +0000 Subject: [PATCH 1/3] Update to latest models --- .../api-change-codepipeline-98058.json | 5 + .../api-change-mailmanager-85745.json | 5 + .../api-change-securitylake-53848.json | 5 + .../api-change-supplychain-68141.json | 5 + .../api-change-transfer-15570.json | 5 + .../codepipeline/2015-07-09/service-2.json | 54 ++- .../mailmanager/2023-10-17/service-2.json | 92 ++++- .../securitylake/2018-05-10/service-2.json | 94 ++--- .../supplychain/2024-01-01/paginators-1.json | 6 + .../supplychain/2024-01-01/service-2.json | 384 ++++++++++++++++++ .../transfer/2018-11-05/paginators-1.json | 6 + .../data/transfer/2018-11-05/service-2.json | 114 +++++- 12 files changed, 717 insertions(+), 58 deletions(-) create mode 100644 .changes/next-release/api-change-codepipeline-98058.json create mode 100644 .changes/next-release/api-change-mailmanager-85745.json create mode 100644 .changes/next-release/api-change-securitylake-53848.json create mode 100644 .changes/next-release/api-change-supplychain-68141.json create mode 100644 .changes/next-release/api-change-transfer-15570.json diff --git a/.changes/next-release/api-change-codepipeline-98058.json b/.changes/next-release/api-change-codepipeline-98058.json new file mode 100644 index 0000000000..ed09441751 --- /dev/null +++ b/.changes/next-release/api-change-codepipeline-98058.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``codepipeline``", + "description": "AWS CodePipeline V2 type pipelines now support automatically retrying failed stages and skipping stage for failed entry conditions." +} diff --git a/.changes/next-release/api-change-mailmanager-85745.json b/.changes/next-release/api-change-mailmanager-85745.json new file mode 100644 index 0000000000..8fd0610c72 --- /dev/null +++ b/.changes/next-release/api-change-mailmanager-85745.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``mailmanager``", + "description": "Mail Manager support for viewing and exporting metadata of archived messages." +} diff --git a/.changes/next-release/api-change-securitylake-53848.json b/.changes/next-release/api-change-securitylake-53848.json new file mode 100644 index 0000000000..8c313b325b --- /dev/null +++ b/.changes/next-release/api-change-securitylake-53848.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``securitylake``", + "description": "This release updates request validation regex for resource ARNs." +} diff --git a/.changes/next-release/api-change-supplychain-68141.json b/.changes/next-release/api-change-supplychain-68141.json new file mode 100644 index 0000000000..d1198fae04 --- /dev/null +++ b/.changes/next-release/api-change-supplychain-68141.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``supplychain``", + "description": "This release adds AWS Supply Chain instance management functionality. Specifically adding CreateInstance, DeleteInstance, GetInstance, ListInstances, and UpdateInstance APIs." +} diff --git a/.changes/next-release/api-change-transfer-15570.json b/.changes/next-release/api-change-transfer-15570.json new file mode 100644 index 0000000000..53d73f28ce --- /dev/null +++ b/.changes/next-release/api-change-transfer-15570.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``transfer``", + "description": "This release enables customers using SFTP connectors to query the transfer status of their files to meet their monitoring needs as well as orchestrate post transfer actions." +} diff --git a/botocore/data/codepipeline/2015-07-09/service-2.json b/botocore/data/codepipeline/2015-07-09/service-2.json index 2186431aab..7b919be2ca 100644 --- a/botocore/data/codepipeline/2015-07-09/service-2.json +++ b/botocore/data/codepipeline/2015-07-09/service-2.json @@ -2301,6 +2301,10 @@ "shape":"Result", "documentation":"

The specified result for when the failure conditions are met, such as rolling back the stage.

" }, + "retryConfiguration":{ + "shape":"RetryConfiguration", + "documentation":"

The retry configuration specifies automatic retry for a failed stage, along with the configured retry mode.

" + }, "conditions":{ "shape":"ConditionList", "documentation":"

The conditions that are configured as failure conditions.

" @@ -4192,9 +4196,25 @@ "type":"string", "enum":[ "ROLLBACK", - "FAIL" + "FAIL", + "RETRY", + "SKIP" ] }, + "RetryAttempt":{ + "type":"integer", + "min":1 + }, + "RetryConfiguration":{ + "type":"structure", + "members":{ + "retryMode":{ + "shape":"StageRetryMode", + "documentation":"

The method that you want to configure for automatic stage retry on stage failure. You can specify to retry only failed action in the stage or all actions in the stage.

" + } + }, + "documentation":"

The retry configuration specifies automatic retry for a failed stage, along with the configured retry mode.

" + }, "RetryStageExecutionInput":{ "type":"structure", "required":[ @@ -4233,6 +4253,31 @@ }, "documentation":"

Represents the output of a RetryStageExecution action.

" }, + "RetryStageMetadata":{ + "type":"structure", + "members":{ + "autoStageRetryAttempt":{ + "shape":"RetryAttempt", + "documentation":"

The number of attempts for a specific stage with automatic retry on stage failure. One attempt is allowed for automatic stage retry on failure.

" + }, + "manualStageRetryAttempt":{ + "shape":"RetryAttempt", + "documentation":"

The number of attempts for a specific stage where manual retries have been made upon stage failure.

" + }, + "latestRetryTrigger":{ + "shape":"RetryTrigger", + "documentation":"

The latest trigger for a specific stage where manual or automatic retries have been made upon stage failure.

" + } + }, + "documentation":"

The details of a specific automatic retry on stage failure, including the attempt number and trigger.

" + }, + "RetryTrigger":{ + "type":"string", + "enum":[ + "AutomatedStageRetry", + "ManualStageRetry" + ] + }, "Revision":{ "type":"string", "max":1500, @@ -4972,7 +5017,8 @@ "Failed", "Stopped", "Stopping", - "Succeeded" + "Succeeded", + "Skipped" ] }, "StageName":{ @@ -5037,6 +5083,10 @@ "onFailureConditionState":{ "shape":"StageConditionState", "documentation":"

The state of the failure conditions for a stage.

" + }, + "retryStageMetadata":{ + "shape":"RetryStageMetadata", + "documentation":"

he details of a specific automatic retry on stage failure, including the attempt number and trigger.

" } }, "documentation":"

Represents information about the state of the stage.

" diff --git a/botocore/data/mailmanager/2023-10-17/service-2.json b/botocore/data/mailmanager/2023-10-17/service-2.json index ddfb826d08..8444470d09 100644 --- a/botocore/data/mailmanager/2023-10-17/service-2.json +++ b/botocore/data/mailmanager/2023-10-17/service-2.json @@ -1027,7 +1027,9 @@ "TO", "FROM", "CC", - "SUBJECT" + "SUBJECT", + "ENVELOPE_TO", + "ENVELOPE_FROM" ] }, "ArchiveStringExpression":{ @@ -1509,6 +1511,24 @@ "type":"list", "member":{"shape":"String"} }, + "Envelope":{ + "type":"structure", + "members":{ + "From":{ + "shape":"String", + "documentation":"

The RCPT FROM given by the host from which the email was received.

" + }, + "Helo":{ + "shape":"String", + "documentation":"

The HELO used by the host from which the email was received.

" + }, + "To":{ + "shape":"StringList", + "documentation":"

All SMTP TO entries given by the host from which the email was received.

" + } + }, + "documentation":"

The SMTP envelope information of the email.

" + }, "ErrorMessage":{"type":"string"}, "ExportDestinationConfiguration":{ "type":"structure", @@ -1719,9 +1739,17 @@ "GetArchiveMessageResponse":{ "type":"structure", "members":{ + "Envelope":{ + "shape":"Envelope", + "documentation":"

The SMTP envelope information of the email.

" + }, "MessageDownloadLink":{ "shape":"S3PresignedURL", "documentation":"

A pre-signed URL to temporarily download the full message content.

" + }, + "Metadata":{ + "shape":"Metadata", + "documentation":"

The metadata about the email.

" } }, "documentation":"

The response containing details about the requested archived email message.

" @@ -2726,6 +2754,44 @@ }, "documentation":"

The textual body content of an email message.

" }, + "Metadata":{ + "type":"structure", + "members":{ + "IngressPointId":{ + "shape":"IngressPointId", + "documentation":"

The ID of the ingress endpoint through which the email was received.

" + }, + "RuleSetId":{ + "shape":"RuleSetId", + "documentation":"

The ID of the rule set that processed the email.

" + }, + "SenderHostname":{ + "shape":"String", + "documentation":"

The name of the host from which the email was received.

" + }, + "SenderIpAddress":{ + "shape":"SenderIpAddress", + "documentation":"

The IP address of the host from which the email was received.

" + }, + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the email was received.

" + }, + "TlsCipherSuite":{ + "shape":"String", + "documentation":"

The TLS cipher suite used to communicate with the host from which the email was received.

" + }, + "TlsProtocol":{ + "shape":"String", + "documentation":"

The TLS protocol used to communicate with the host from which the email was received.

" + }, + "TrafficPolicyId":{ + "shape":"TrafficPolicyId", + "documentation":"

The ID of the traffic policy that was in effect when the email was received.

" + } + }, + "documentation":"

The metadata about the email.

" + }, "MimeHeaderAttribute":{ "type":"string", "pattern":"^X-[a-zA-Z0-9-]{1,256}$" @@ -2950,6 +3016,10 @@ "shape":"String", "documentation":"

The date the email was sent.

" }, + "Envelope":{ + "shape":"Envelope", + "documentation":"

The SMTP envelope information of the email.

" + }, "From":{ "shape":"String", "documentation":"

The email address of the sender.

" @@ -2962,6 +3032,10 @@ "shape":"String", "documentation":"

The email message ID this is a reply to.

" }, + "IngressPointId":{ + "shape":"IngressPointId", + "documentation":"

The ID of the ingress endpoint through which the email was received.

" + }, "MessageId":{ "shape":"String", "documentation":"

The unique message ID of the email.

" @@ -2974,6 +3048,14 @@ "shape":"Timestamp", "documentation":"

The timestamp of when the email was received.

" }, + "SenderHostname":{ + "shape":"String", + "documentation":"

The name of the host from which the email was received.

" + }, + "SenderIpAddress":{ + "shape":"SenderIpAddress", + "documentation":"

The IP address of the host from which the email was received.

" + }, "Subject":{ "shape":"String", "documentation":"

The subject header value of the email.

" @@ -3617,6 +3699,10 @@ }, "documentation":"

Sends the email to the internet using the ses:SendRawEmail API.

" }, + "SenderIpAddress":{ + "type":"string", + "sensitive":true + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -3657,6 +3743,10 @@ "shape":"Timestamp", "documentation":"

The start of the timestamp range to include emails from.

" }, + "IncludeMetadata":{ + "shape":"Boolean", + "documentation":"

Whether to include message metadata as JSON files in the export.

" + }, "MaxResults":{ "shape":"ExportMaxResults", "documentation":"

The maximum number of email items to include in the export.

" diff --git a/botocore/data/securitylake/2018-05-10/service-2.json b/botocore/data/securitylake/2018-05-10/service-2.json index c1c96cff08..ae0a969910 100644 --- a/botocore/data/securitylake/2018-05-10/service-2.json +++ b/botocore/data/securitylake/2018-05-10/service-2.json @@ -31,7 +31,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Adds a natively supported Amazon Web Service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Service as a source, Security Lake starts collecting logs and events from it.

You can use this API only to enable natively supported Amazon Web Services as a source. Use CreateCustomLogSource to enable data collection from a custom source.

" + "documentation":"

Adds a natively supported Amazon Web Services service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Services service as a source, Security Lake starts collecting logs and events from it.

You can use this API only to enable natively supported Amazon Web Services services as a source. Use CreateCustomLogSource to enable data collection from a custom source.

" }, "CreateCustomLogSource":{ "name":"CreateCustomLogSource", @@ -70,7 +70,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. To specify particular Regions, configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations.

When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide.

" + "documentation":"

Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. To specify particular Regions, configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations.

When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call and after you create subscribers using the CreateSubscriber API. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide.

" }, "CreateDataLakeExceptionSubscription":{ "name":"CreateDataLakeExceptionSubscription", @@ -89,7 +89,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates the specified notification subscription in Amazon Security Lake for the organization you specify.

" + "documentation":"

Creates the specified notification subscription in Amazon Security Lake for the organization you specify. The notification subscription is created for exceptions that cannot be resolved by Security Lake automatically.

" }, "CreateDataLakeOrganizationConfiguration":{ "name":"CreateDataLakeOrganizationConfiguration", @@ -127,7 +127,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates a subscription permission for accounts that are already enabled in Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region.

" + "documentation":"

Creates a subscriber for accounts that are already enabled in Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region.

" }, "CreateSubscriberNotification":{ "name":"CreateSubscriberNotification", @@ -165,7 +165,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Removes a natively supported Amazon Web Service as an Amazon Security Lake source. You can remove a source for one or more Regions. When you remove the source, Security Lake stops collecting data from that source in the specified Regions and accounts, and subscribers can no longer consume new data from the source. However, subscribers can still consume data that Security Lake collected from the source before removal.

You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts.

" + "documentation":"

Removes a natively supported Amazon Web Services service as an Amazon Security Lake source. You can remove a source for one or more Regions. When you remove the source, Security Lake stops collecting data from that source in the specified Regions and accounts, and subscribers can no longer consume new data from the source. However, subscribers can still consume data that Security Lake collected from the source before removal.

You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts.

" }, "DeleteCustomLogSource":{ "name":"DeleteCustomLogSource", @@ -283,7 +283,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes the specified notification subscription in Amazon Security Lake for the organization you specify.

", + "documentation":"

Deletes the specified subscription notification in Amazon Security Lake for the organization you specify.

", "idempotent":true }, "DeregisterDataLakeDelegatedAdministrator":{ @@ -323,7 +323,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves the details of exception notifications for the account in Amazon Security Lake.

" + "documentation":"

Retrieves the protocol and endpoint that were provided when subscribing to Amazon SNS topics for exception notifications.

" }, "GetDataLakeOrganizationConfiguration":{ "name":"GetDataLakeOrganizationConfiguration", @@ -437,7 +437,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves the log sources in the current Amazon Web Services Region.

" + "documentation":"

Retrieves the log sources.

" }, "ListSubscribers":{ "name":"ListSubscribers", @@ -456,7 +456,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

List all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list of subscriptions associated with a specific organization or Amazon Web Services account.

" + "documentation":"

Lists all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list of subscriptions associated with a specific organization or Amazon Web Services account.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -553,7 +553,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Specifies where to store your security data and for how long. You can add a rollup Region to consolidate data from multiple Amazon Web Services Regions.

", + "documentation":"

You can use UpdateDataLake to specify where to store your security data, how it should be encrypted at rest and for how long. You can add a Rollup Region to consolidate data from multiple Amazon Web Services Regions, replace default encryption (SSE-S3) with Customer Manged Key, or specify transition and expiration actions through storage Lifecycle management. The UpdateDataLake API works as an \"upsert\" operation that performs an insert if the specified item or record does not exist, or an update if it already exists. Security Lake securely stores your data at rest using Amazon Web Services encryption solutions. For more details, see Data protection in Amazon Security Lake.

For example, omitting the key encryptionConfiguration from a Region that is included in an update call that currently uses KMS will leave that Region's KMS key in place, but specifying encryptionConfiguration: {kmsKeyId: 'S3_MANAGED_KEY'} for that same Region will reset the key to S3-managed.

For more details about lifecycle management and how to update retention settings for one or more Regions after enabling Security Lake, see the Amazon Security Lake User Guide.

", "idempotent":true }, "UpdateDataLakeExceptionSubscription":{ @@ -653,7 +653,7 @@ "type":"string", "max":1011, "min":1, - "pattern":"^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$" + "pattern":"^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-Za-z0-9_/.\\-]{0,127}$" }, "AwsAccountId":{ "type":"string", @@ -670,14 +670,14 @@ "members":{ "externalId":{ "shape":"ExternalId", - "documentation":"

The external ID used to estalish trust relationship with the AWS identity.

" + "documentation":"

The external ID used to establish trust relationship with the Amazon Web Services identity.

" }, "principal":{ "shape":"AwsPrincipal", - "documentation":"

The AWS identity principal.

" + "documentation":"

The Amazon Web Services identity principal.

" } }, - "documentation":"

The AWS identity.

" + "documentation":"

The Amazon Web Services identity.

" }, "AwsLogSourceConfiguration":{ "type":"structure", @@ -696,14 +696,14 @@ }, "sourceName":{ "shape":"AwsLogSourceName", - "documentation":"

The name for a Amazon Web Services source. This must be a Regionally unique value.

" + "documentation":"

The name for a Amazon Web Services source.

" }, "sourceVersion":{ "shape":"AwsLogSourceVersion", - "documentation":"

The version for a Amazon Web Services source. This must be a Regionally unique value.

" + "documentation":"

The version for a Amazon Web Services source.

" } }, - "documentation":"

The Security Lake logs source configuration file describes the information needed to generate Security Lake logs.

" + "documentation":"

To add a natively-supported Amazon Web Services service as a log source, use these parameters to specify the configuration settings for the log source.

" }, "AwsLogSourceConfigurationList":{ "type":"list", @@ -798,7 +798,7 @@ "members":{ "failed":{ "shape":"AccountList", - "documentation":"

Lists all accounts in which enabling a natively supported Amazon Web Service as a Security Lake source failed. The failure occurred as these accounts are not part of an organization.

" + "documentation":"

Lists all accounts in which enabling a natively supported Amazon Web Services service as a Security Lake source failed. The failure occurred as these accounts are not part of an organization.

" } } }, @@ -811,7 +811,7 @@ "members":{ "configuration":{ "shape":"CustomLogSourceConfiguration", - "documentation":"

The configuration for the third-party custom source.

" + "documentation":"

The configuration used for the third-party custom source.

" }, "eventClasses":{ "shape":"OcsfEventClassList", @@ -819,7 +819,7 @@ }, "sourceName":{ "shape":"CustomLogSourceName", - "documentation":"

Specify the name for a third-party custom source. This must be a Regionally unique value.

" + "documentation":"

Specify the name for a third-party custom source. This must be a Regionally unique value. The sourceName you enter here, is used in the LogProviderRole name which follows the convention AmazonSecurityLake-Provider-{name of the custom source}-{region}. You must use a CustomLogSource name that is shorter than or equal to 20 characters. This ensures that the LogProviderRole name is below the 64 character limit.

" }, "sourceVersion":{ "shape":"CustomLogSourceVersion", @@ -832,7 +832,7 @@ "members":{ "source":{ "shape":"CustomLogSourceResource", - "documentation":"

The created third-party custom source.

" + "documentation":"

The third-party custom source that was created.

" } } }, @@ -845,7 +845,7 @@ "members":{ "exceptionTimeToLive":{ "shape":"CreateDataLakeExceptionSubscriptionRequestExceptionTimeToLiveLong", - "documentation":"

The expiration period and time-to-live (TTL).

" + "documentation":"

The expiration period and time-to-live (TTL). It is the duration of time until which the exception message remains.

" }, "notificationEndpoint":{ "shape":"SafeString", @@ -953,7 +953,7 @@ }, "sources":{ "shape":"LogSourceResourceList", - "documentation":"

The supported Amazon Web Services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services.

" + "documentation":"

The supported Amazon Web Services services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services services.

" }, "subscriberDescription":{ "shape":"DescriptionString", @@ -1014,14 +1014,14 @@ "members":{ "crawlerConfiguration":{ "shape":"CustomLogSourceCrawlerConfiguration", - "documentation":"

The configuration for the Glue Crawler for the third-party custom source.

" + "documentation":"

The configuration used for the Glue Crawler for a third-party custom source.

" }, "providerIdentity":{ "shape":"AwsIdentity", "documentation":"

The identity of the log provider for the third-party custom source.

" } }, - "documentation":"

The configuration for the third-party custom source.

" + "documentation":"

The configuration used for the third-party custom source.

" }, "CustomLogSourceCrawlerConfiguration":{ "type":"structure", @@ -1032,7 +1032,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role to be used by the Glue crawler. The recommended IAM policies are:

" } }, - "documentation":"

The configuration for the Glue Crawler for the third-party custom source.

" + "documentation":"

The configuration used for the Glue Crawler for a third-party custom source.

" }, "CustomLogSourceName":{ "type":"string", @@ -1138,7 +1138,7 @@ "members":{ "kmsKeyId":{ "shape":"String", - "documentation":"

The id of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object.

" + "documentation":"

The identifier of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object.

" } }, "documentation":"

Provides encryption details of Amazon Security Lake object.

" @@ -1244,7 +1244,7 @@ "members":{ "createStatus":{ "shape":"DataLakeStatus", - "documentation":"

Retrieves the status of the configuration operation for an account in Amazon Security Lake.

" + "documentation":"

Retrieves the status of the CreateDatalake API call for an account in Amazon Security Lake.

" }, "dataLakeArn":{ "shape":"AmazonResourceName", @@ -1294,14 +1294,14 @@ }, "sourceName":{ "shape":"String", - "documentation":"

The supported Amazon Web Services from which logs and events are collected. Amazon Security Lake supports log and event collection for natively supported Amazon Web Services.

" + "documentation":"

The supported Amazon Web Services services from which logs and events are collected. Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services.

" }, "sourceStatuses":{ "shape":"DataLakeSourceStatusList", "documentation":"

The log status for the Security Lake account.

" } }, - "documentation":"

Amazon Security Lake collects logs and events from supported Amazon Web Services and custom sources. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

" + "documentation":"

Amazon Security Lake collects logs and events from supported Amazon Web Services services and custom sources. For the list of supported Amazon Web Services services, see the Amazon Security Lake User Guide.

" }, "DataLakeSourceList":{ "type":"list", @@ -1512,7 +1512,7 @@ "members":{ "exceptionTimeToLive":{ "shape":"Long", - "documentation":"

The expiration period and time-to-live (TTL).

" + "documentation":"

The expiration period and time-to-live (TTL). It is the duration of time until which the exception message remains.

" }, "notificationEndpoint":{ "shape":"SafeString", @@ -1534,7 +1534,7 @@ "members":{ "autoEnableNewAccount":{ "shape":"DataLakeAutoEnableNewAccountConfigurationList", - "documentation":"

The configuration for new accounts.

" + "documentation":"

The configuration used for new accounts in Security Lake.

" } } }, @@ -1628,7 +1628,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the EventBridge API destinations IAM role that you created. For more information about ARNs and how to use them in policies, see Managing data access and Amazon Web Services Managed Policies in the Amazon Security Lake User Guide.

" } }, - "documentation":"

The configurations for HTTPS subscriber notification.

" + "documentation":"

The configurations used for HTTPS subscriber notification.

" }, "HttpsNotificationConfigurationEndpointString":{ "type":"string", @@ -1654,11 +1654,11 @@ "members":{ "maxResults":{ "shape":"MaxResults", - "documentation":"

List the maximum number of failures in Security Lake.

" + "documentation":"

Lists the maximum number of failures in Security Lake.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + "documentation":"

Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" }, "regions":{ "shape":"RegionList", @@ -1671,11 +1671,11 @@ "members":{ "exceptions":{ "shape":"DataLakeExceptionList", - "documentation":"

Lists the failures that cannot be retried in the current Region.

" + "documentation":"

Lists the failures that cannot be retried.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + "documentation":"

Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" } } }, @@ -1815,14 +1815,14 @@ "members":{ "awsLogSource":{ "shape":"AwsLogSourceResource", - "documentation":"

Amazon Security Lake supports log and event collection for natively supported Amazon Web Services. For more information, see the Amazon Security Lake User Guide.

" + "documentation":"

Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide.

" }, "customLogSource":{ "shape":"CustomLogSourceResource", "documentation":"

Amazon Security Lake supports custom source types. For more information, see the Amazon Security Lake User Guide.

" } }, - "documentation":"

The supported source types from which logs and events are collected in Amazon Security Lake. For a list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

", + "documentation":"

The supported source types from which logs and events are collected in Amazon Security Lake. For a list of supported Amazon Web Services services, see the Amazon Security Lake User Guide.

", "union":true }, "LogSourceResourceList":{ @@ -1849,7 +1849,7 @@ "members":{ "httpsNotificationConfiguration":{ "shape":"HttpsNotificationConfiguration", - "documentation":"

The configurations for HTTPS subscriber notification.

" + "documentation":"

The configurations used for HTTPS subscriber notification.

" }, "sqsNotificationConfiguration":{ "shape":"SqsNotificationConfiguration", @@ -1943,7 +1943,7 @@ "type":"structure", "members":{ }, - "documentation":"

The configurations for SQS subscriber notification.

" + "documentation":"

The configurations used for EventBridge subscriber notification.

" }, "String":{"type":"string"}, "SubscriberResource":{ @@ -1982,7 +1982,7 @@ }, "sources":{ "shape":"LogSourceResourceList", - "documentation":"

Amazon Security Lake supports log and event collection for natively supported Amazon Web Services. For more information, see the Amazon Security Lake User Guide.

" + "documentation":"

Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide.

" }, "subscriberArn":{ "shape":"AmazonResourceName", @@ -2170,7 +2170,7 @@ "members":{ "exceptionTimeToLive":{ "shape":"UpdateDataLakeExceptionSubscriptionRequestExceptionTimeToLiveLong", - "documentation":"

The time-to-live (TTL) for the exception message to remain.

" + "documentation":"

The time-to-live (TTL) for the exception message to remain. It is the duration of time until which the exception message remains.

" }, "notificationEndpoint":{ "shape":"SafeString", @@ -2198,7 +2198,7 @@ "members":{ "configurations":{ "shape":"DataLakeConfigurationList", - "documentation":"

Specify the Region or Regions that will contribute data to the rollup region.

" + "documentation":"

Specifies the Region or Regions that will contribute data to the rollup region.

" }, "metaStoreManagerRoleArn":{ "shape":"RoleArn", @@ -2249,7 +2249,7 @@ "members":{ "sources":{ "shape":"LogSourceResourceList", - "documentation":"

The supported Amazon Web Services from which logs and events are collected. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

" + "documentation":"

The supported Amazon Web Services services from which logs and events are collected. For the list of supported Amazon Web Services services, see the Amazon Security Lake User Guide.

" }, "subscriberDescription":{ "shape":"DescriptionString", @@ -2263,7 +2263,7 @@ }, "subscriberIdentity":{ "shape":"AwsIdentity", - "documentation":"

The AWS identity used to access your data.

" + "documentation":"

The Amazon Web Services identity used to access your data.

" }, "subscriberName":{ "shape":"UpdateSubscriberRequestSubscriberNameString", @@ -2287,5 +2287,5 @@ } } }, - "documentation":"

Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data.

The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data.

Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide.

Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF).

Other Amazon Web Services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics.

" + "documentation":"

Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data.

The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data.

Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide.

Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF).

Other Amazon Web Services services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics.

" } diff --git a/botocore/data/supplychain/2024-01-01/paginators-1.json b/botocore/data/supplychain/2024-01-01/paginators-1.json index b92bd396a2..8ca1db6b54 100644 --- a/botocore/data/supplychain/2024-01-01/paginators-1.json +++ b/botocore/data/supplychain/2024-01-01/paginators-1.json @@ -11,6 +11,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "datasets" + }, + "ListInstances": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "instances" } } } diff --git a/botocore/data/supplychain/2024-01-01/service-2.json b/botocore/data/supplychain/2024-01-01/service-2.json index c9302aac71..a344dd6441 100644 --- a/botocore/data/supplychain/2024-01-01/service-2.json +++ b/botocore/data/supplychain/2024-01-01/service-2.json @@ -76,6 +76,27 @@ "documentation":"

Create a data lake dataset.

", "idempotent":true }, + "CreateInstance":{ + "name":"CreateInstance", + "http":{ + "method":"POST", + "requestUri":"/api/instance", + "responseCode":200 + }, + "input":{"shape":"CreateInstanceRequest"}, + "output":{"shape":"CreateInstanceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Create a new instance for AWS Supply Chain. This is an asynchronous operation. Upon receiving a CreateInstance request, AWS Supply Chain immediately returns the instance resource, with instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance.

", + "idempotent":true + }, "DeleteDataIntegrationFlow":{ "name":"DeleteDataIntegrationFlow", "http":{ @@ -118,6 +139,27 @@ "documentation":"

Delete a data lake dataset.

", "idempotent":true }, + "DeleteInstance":{ + "name":"DeleteInstance", + "http":{ + "method":"DELETE", + "requestUri":"/api/instance/{instanceId}", + "responseCode":200 + }, + "input":{"shape":"DeleteInstanceRequest"}, + "output":{"shape":"DeleteInstanceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Delete the instance. This is an asynchronous operation. Upon receiving a DeleteInstance request, AWS Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status.

", + "idempotent":true + }, "GetBillOfMaterialsImportJob":{ "name":"GetBillOfMaterialsImportJob", "http":{ @@ -178,6 +220,26 @@ ], "documentation":"

Get a data lake dataset.

" }, + "GetInstance":{ + "name":"GetInstance", + "http":{ + "method":"GET", + "requestUri":"/api/instance/{instanceId}", + "responseCode":200 + }, + "input":{"shape":"GetInstanceRequest"}, + "output":{"shape":"GetInstanceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Get the AWS Supply Chain instance details.

" + }, "ListDataIntegrationFlows":{ "name":"ListDataIntegrationFlows", "http":{ @@ -218,6 +280,26 @@ ], "documentation":"

List the data lake datasets for a specific instance and name space.

" }, + "ListInstances":{ + "name":"ListInstances", + "http":{ + "method":"GET", + "requestUri":"/api/instance", + "responseCode":200 + }, + "input":{"shape":"ListInstancesRequest"}, + "output":{"shape":"ListInstancesResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

List all the AWS Supply Chain instances in a paginated way.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -339,6 +421,26 @@ {"shape":"ConflictException"} ], "documentation":"

Update a data lake dataset.

" + }, + "UpdateInstance":{ + "name":"UpdateInstance", + "http":{ + "method":"PATCH", + "requestUri":"/api/instance/{instanceId}", + "responseCode":200 + }, + "input":{"shape":"UpdateInstanceRequest"}, + "output":{"shape":"UpdateInstanceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Update the instance.

" } }, "shapes":{ @@ -360,6 +462,10 @@ "min":20, "pattern":"arn:aws:scn(?::([a-z0-9-]+):([0-9]+):instance)?/([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})[-_./A-Za-z0-9]*" }, + "AwsAccountId":{ + "type":"string", + "pattern":"[0-9]{12}" + }, "BillOfMaterialsImportJob":{ "type":"structure", "required":[ @@ -577,6 +683,44 @@ }, "documentation":"

The response parameters of CreateDataLakeDataset.

" }, + "CreateInstanceRequest":{ + "type":"structure", + "members":{ + "instanceName":{ + "shape":"InstanceName", + "documentation":"

The AWS Supply Chain instance name.

" + }, + "instanceDescription":{ + "shape":"InstanceDescription", + "documentation":"

The AWS Supply Chain instance description.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon Web Services owned KMS key. If you don't provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The Amazon Web Services tags of an instance to be created.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

The client token for idempotency.

", + "idempotencyToken":true + } + }, + "documentation":"

The request parameters for CreateInstance.

" + }, + "CreateInstanceResponse":{ + "type":"structure", + "required":["instance"], + "members":{ + "instance":{ + "shape":"Instance", + "documentation":"

The AWS Supply Chain instance resource data details.

" + } + }, + "documentation":"

The response parameters for CreateInstance.

" + }, "DataIntegrationEventData":{ "type":"string", "max":1048576, @@ -1147,6 +1291,34 @@ }, "documentation":"

The response parameters of DeleteDataLakeDataset.

" }, + "DeleteInstanceRequest":{ + "type":"structure", + "required":["instanceId"], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

The AWS Supply Chain instance identifier.

", + "location":"uri", + "locationName":"instanceId" + } + }, + "documentation":"

The request parameters for DeleteInstance.

" + }, + "DeleteInstanceResponse":{ + "type":"structure", + "required":["instance"], + "members":{ + "instance":{ + "shape":"Instance", + "documentation":"

The AWS Supply Chain instance resource data details.

" + } + }, + "documentation":"

The response parameters for DeleteInstance.

" + }, + "Double":{ + "type":"double", + "box":true + }, "GetBillOfMaterialsImportJobRequest":{ "type":"structure", "required":[ @@ -1253,6 +1425,135 @@ }, "documentation":"

The response parameters for UpdateDataLakeDataset.

" }, + "GetInstanceRequest":{ + "type":"structure", + "required":["instanceId"], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

The AWS Supply Chain instance identifier

", + "location":"uri", + "locationName":"instanceId" + } + }, + "documentation":"

The request parameters for GetInstance.

" + }, + "GetInstanceResponse":{ + "type":"structure", + "required":["instance"], + "members":{ + "instance":{ + "shape":"Instance", + "documentation":"

The instance resource data details.

" + } + }, + "documentation":"

The response parameters for GetInstance.

" + }, + "Instance":{ + "type":"structure", + "required":[ + "instanceId", + "awsAccountId", + "state" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

The Amazon Web Services Supply Chain instance identifier.

" + }, + "awsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The Amazon Web Services account ID that owns the instance.

" + }, + "state":{ + "shape":"InstanceState", + "documentation":"

The state of the instance.

" + }, + "webAppDnsDomain":{ + "shape":"InstanceWebAppDnsDomain", + "documentation":"

The WebApp DNS domain name of the instance.

" + }, + "createdTime":{ + "shape":"Timestamp", + "documentation":"

The instance creation timestamp.

" + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The instance last modified timestamp.

" + }, + "instanceName":{ + "shape":"InstanceName", + "documentation":"

The Amazon Web Services Supply Chain instance name.

" + }, + "instanceDescription":{ + "shape":"InstanceDescription", + "documentation":"

The Amazon Web Services Supply Chain instance description.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you optionally provided for encryption. If you did not provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key and nothing is returned.

" + }, + "versionNumber":{ + "shape":"Double", + "documentation":"

The version number of the instance.

" + } + }, + "documentation":"

The details of the instance.

" + }, + "InstanceDescription":{ + "type":"string", + "max":501, + "min":0, + "pattern":"([a-zA-Z0-9., _ʼ'%-]){0,500}" + }, + "InstanceList":{ + "type":"list", + "member":{"shape":"Instance"} + }, + "InstanceMaxResults":{ + "type":"integer", + "box":true, + "max":20, + "min":0 + }, + "InstanceName":{ + "type":"string", + "max":63, + "min":0, + "pattern":"(?![ _ʼ'%-])[a-zA-Z0-9 _ʼ'%-]{0,62}[a-zA-Z0-9]" + }, + "InstanceNameList":{ + "type":"list", + "member":{"shape":"InstanceName"}, + "max":10, + "min":0 + }, + "InstanceNextToken":{ + "type":"string", + "max":1024, + "min":1 + }, + "InstanceState":{ + "type":"string", + "enum":[ + "Initializing", + "Active", + "CreateFailed", + "DeleteFailed", + "Deleting", + "Deleted" + ] + }, + "InstanceStateList":{ + "type":"list", + "member":{"shape":"InstanceState"}, + "max":6, + "min":0 + }, + "InstanceWebAppDnsDomain":{ + "type":"string", + "pattern":"[A-Za-z0-9]+(.[A-Za-z0-9]+)+" + }, "InternalServerException":{ "type":"structure", "members":{ @@ -1264,6 +1565,12 @@ "fault":true, "retryable":{"throttling":false} }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"arn:[a-z0-9][-.a-z0-9]{0,62}:kms:([a-z0-9][-.a-z0-9]{0,62})?:([a-z0-9][-.a-z0-9]{0,62})?:key/.{0,1019}" + }, "ListDataIntegrationFlowsRequest":{ "type":"structure", "required":["instanceId"], @@ -1353,6 +1660,51 @@ }, "documentation":"

The response parameters of ListDataLakeDatasets.

" }, + "ListInstancesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"InstanceNextToken", + "documentation":"

The pagination token to fetch the next page of instances.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"InstanceMaxResults", + "documentation":"

Specify the maximum number of instances to fetch in this paginated request.

", + "location":"querystring", + "locationName":"maxResults" + }, + "instanceNameFilter":{ + "shape":"InstanceNameList", + "documentation":"

The filter to ListInstances based on their names.

", + "location":"querystring", + "locationName":"instanceNameFilter" + }, + "instanceStateFilter":{ + "shape":"InstanceStateList", + "documentation":"

The filter to ListInstances based on their state.

", + "location":"querystring", + "locationName":"instanceStateFilter" + } + }, + "documentation":"

The request parameters for ListInstances.

" + }, + "ListInstancesResponse":{ + "type":"structure", + "required":["instances"], + "members":{ + "instances":{ + "shape":"InstanceList", + "documentation":"

The list of instances resource data details.

" + }, + "nextToken":{ + "shape":"InstanceNextToken", + "documentation":"

The pagination token to fetch the next page of instances.

" + } + }, + "documentation":"

The response parameters for ListInstances.

" + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -1648,6 +2000,38 @@ }, "documentation":"

The response parameters of UpdateDataLakeDataset.

" }, + "UpdateInstanceRequest":{ + "type":"structure", + "required":["instanceId"], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

The AWS Supply Chain instance identifier.

", + "location":"uri", + "locationName":"instanceId" + }, + "instanceName":{ + "shape":"InstanceName", + "documentation":"

The AWS Supply Chain instance name.

" + }, + "instanceDescription":{ + "shape":"InstanceDescription", + "documentation":"

The AWS Supply Chain instance description.

" + } + }, + "documentation":"

The request parameters for UpdateInstance.

" + }, + "UpdateInstanceResponse":{ + "type":"structure", + "required":["instance"], + "members":{ + "instance":{ + "shape":"Instance", + "documentation":"

The instance resource data details.

" + } + }, + "documentation":"

The response parameters for UpdateInstance.

" + }, "ValidationException":{ "type":"structure", "members":{ diff --git a/botocore/data/transfer/2018-11-05/paginators-1.json b/botocore/data/transfer/2018-11-05/paginators-1.json index 3fe23dc9f4..2ae66f4d15 100644 --- a/botocore/data/transfer/2018-11-05/paginators-1.json +++ b/botocore/data/transfer/2018-11-05/paginators-1.json @@ -77,6 +77,12 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "Profiles" + }, + "ListFileTransferResults": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FileTransferResults" } } } diff --git a/botocore/data/transfer/2018-11-05/service-2.json b/botocore/data/transfer/2018-11-05/service-2.json index 3422efdcb6..fb236196fe 100644 --- a/botocore/data/transfer/2018-11-05/service-2.json +++ b/botocore/data/transfer/2018-11-05/service-2.json @@ -2,6 +2,7 @@ "version":"2.0", "metadata":{ "apiVersion":"2018-11-05", + "auth":["aws.auth#sigv4"], "endpointPrefix":"transfer", "jsonVersion":"1.1", "protocol":"json", @@ -614,6 +615,22 @@ ], "documentation":"

Lists all in-progress executions for the specified workflow.

If the specified workflow ID cannot be found, ListExecutions returns a ResourceNotFound exception.

" }, + "ListFileTransferResults":{ + "name":"ListFileTransferResults", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFileTransferResultsRequest"}, + "output":{"shape":"ListFileTransferResultsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceError"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Returns real-time updates and detailed information on the status of each individual file being transferred in a specific file transfer operation. You specify the file transfer by providing its ConnectorId and its TransferId.

File transfer results are available up to 7 days after an operation has been requested.

" + }, "ListHostKeys":{ "name":"ListHostKeys", "http":{ @@ -1195,6 +1212,38 @@ "documentation":"

This exception is thrown when the UpdateServer is called for a file transfer protocol-enabled server that has VPC as the endpoint type and the server's VpcEndpointID is not in the available state.

", "exception":true }, + "ConnectorFileTransferResult":{ + "type":"structure", + "required":[ + "FilePath", + "StatusCode" + ], + "members":{ + "FilePath":{ + "shape":"FilePath", + "documentation":"

The filename and path to where the file was sent to or retrieved from.

" + }, + "StatusCode":{ + "shape":"TransferTableStatus", + "documentation":"

The current status for the transfer.

" + }, + "FailureCode":{ + "shape":"FailureCode", + "documentation":"

For transfers that fail, this parameter contains a code indicating the reason. For example, RETRIEVE_FILE_NOT_FOUND

" + }, + "FailureMessage":{ + "shape":"Message", + "documentation":"

For transfers that fail, this parameter describes the reason for the failure.

" + } + }, + "documentation":"

A structure that contains the details for files transferred using an SFTP connector, during a single transfer.

" + }, + "ConnectorFileTransferResults":{ + "type":"list", + "member":{"shape":"ConnectorFileTransferResult"}, + "max":1000, + "min":0 + }, "ConnectorId":{ "type":"string", "max":19, @@ -1313,7 +1362,7 @@ }, "BaseDirectory":{ "shape":"HomeDirectory", - "documentation":"

The landing directory (folder) for files transferred by using the AS2 protocol.

A BaseDirectory example is /DOC-EXAMPLE-BUCKET/home/mydirectory.

" + "documentation":"

The landing directory (folder) for files transferred by using the AS2 protocol.

A BaseDirectory example is /amzn-s3-demo-bucket/home/mydirectory.

" }, "AccessRole":{ "shape":"Role", @@ -2701,7 +2750,7 @@ "documentation":"

A list of security groups IDs that are available to attach to your server's endpoint.

This property can only be set when EndpointType is set to VPC.

You can edit the SecurityGroupIds property in the UpdateServer API only if you are changing the EndpointType from PUBLIC or VPC_ENDPOINT to VPC. To change security groups associated with your server's VPC endpoint after creation, use the Amazon EC2 ModifyVpcEndpoint API.

" } }, - "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP address to your server's endpoint.

After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC.

For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.

" + "documentation":"

The virtual private cloud (VPC) endpoint settings that are configured for your file transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server and resources only within your VPC. To control incoming internet traffic, invoke the UpdateServer API and attach an Elastic IP address to your server's endpoint.

After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC.

For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" }, "EndpointType":{ "type":"string", @@ -2802,6 +2851,7 @@ "min":1, "pattern":"S-1-[\\d-]+" }, + "FailureCode":{"type":"string"}, "FileLocation":{ "type":"structure", "members":{ @@ -3279,6 +3329,45 @@ } } }, + "ListFileTransferResultsRequest":{ + "type":"structure", + "required":[ + "ConnectorId", + "TransferId" + ], + "members":{ + "ConnectorId":{ + "shape":"ConnectorId", + "documentation":"

A unique identifier for a connector. This value should match the value supplied to the corresponding StartFileTransfer call.

" + }, + "TransferId":{ + "shape":"TransferId", + "documentation":"

A unique identifier for a file transfer. This value should match the value supplied to the corresponding StartFileTransfer call.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are more file details than returned in this call, use this value for a subsequent call to ListFileTransferResults to retrieve them.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of files to return in a single page. Note that currently you can specify a maximum of 10 file paths in a single StartFileTransfer operation. Thus, the maximum number of file transfer results that can be returned in a single page is 10.

" + } + } + }, + "ListFileTransferResultsResponse":{ + "type":"structure", + "required":["FileTransferResults"], + "members":{ + "FileTransferResults":{ + "shape":"ConnectorFileTransferResults", + "documentation":"

Returns the details for the files transferred in the transfer identified by the TransferId and ConnectorId specified.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

Returns a token that you can use to call ListFileTransferResults again and receive additional results, if there are any (against the same TransferId.

" + } + } + }, "ListHostKeysRequest":{ "type":"structure", "required":["ServerId"], @@ -4485,7 +4574,7 @@ }, "SendFilePaths":{ "shape":"FilePaths", - "documentation":"

One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt .

Replace DOC-EXAMPLE-BUCKET with one of your actual buckets.

" + "documentation":"

One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, amzn-s3-demo-bucket/myfile.txt .

Replace amzn-s3-demo-bucket with one of your actual buckets.

" }, "RetrieveFilePaths":{ "shape":"FilePaths", @@ -4739,6 +4828,15 @@ "min":1, "pattern":"[0-9a-zA-Z./-]+" }, + "TransferTableStatus":{ + "type":"string", + "enum":[ + "QUEUED", + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -4844,7 +4942,7 @@ }, "BaseDirectory":{ "shape":"HomeDirectory", - "documentation":"

To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /DOC-EXAMPLE-BUCKET/home/mydirectory .

" + "documentation":"

To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /amzn-s3-demo-bucket/home/mydirectory .

" }, "AccessRole":{ "shape":"Role", @@ -5019,7 +5117,7 @@ }, "EndpointType":{ "shape":"EndpointType", - "documentation":"

The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.

After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC.

For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" + "documentation":"

The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.

After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC.

For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.

It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT.

" }, "HostKey":{ "shape":"HostKey", @@ -5215,11 +5313,11 @@ "members":{ "OnUpload":{ "shape":"OnUploadWorkflowDetails", - "documentation":"

A trigger that starts a workflow: the workflow begins to execute after a file is uploaded.

To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.

aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'

" + "documentation":"

A trigger that starts a workflow: the workflow begins to execute after a file is uploaded.

To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.

aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'

OnUpload can contain a maximum of one WorkflowDetail object.

" }, "OnPartialUpload":{ "shape":"OnPartialUploadWorkflowDetails", - "documentation":"

A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server that executes whenever there is a partial upload.

A partial upload occurs when a file is open when the session disconnects.

" + "documentation":"

A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server that executes whenever there is a partial upload.

A partial upload occurs when a file is open when the session disconnects.

OnPartialUpload can contain a maximum of one WorkflowDetail object.

" } }, "documentation":"

Container for the WorkflowDetail data type. It is used by actions that trigger a workflow to begin execution.

" @@ -5283,5 +5381,5 @@ "min":0 } }, - "documentation":"

Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS. Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3. Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services for processing, analytics, machine learning, and archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and set up.

" + "documentation":"

Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS. Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3. Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services services for processing, analytics, machine learning, and archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and set up.

" } From d7b78f059c43c3afaed3884ffa9b448379d1e4d5 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Mon, 14 Oct 2024 18:05:19 +0000 Subject: [PATCH 2/3] Update endpoints model --- botocore/data/endpoints.json | 91 ++++++++++++++++++++++++++++++------ 1 file changed, 77 insertions(+), 14 deletions(-) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 696bf8d1b3..d05ebee7cd 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -2055,20 +2055,76 @@ "protocols" : [ "https" ] }, "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-2" : { } + "ap-northeast-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + } } }, "arc-zonal-shift" : { @@ -2576,6 +2632,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -28092,6 +28149,12 @@ } } }, + "schemas" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "secretsmanager" : { "endpoints" : { "us-gov-east-1" : { From de00677506377f7ab89f67d43e81c0fe7e7d0ae7 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Mon, 14 Oct 2024 18:06:24 +0000 Subject: [PATCH 3/3] Bumping version to 1.35.40 --- .changes/1.35.40.json | 27 +++++++++++++++++++ .../api-change-codepipeline-98058.json | 5 ---- .../api-change-mailmanager-85745.json | 5 ---- .../api-change-securitylake-53848.json | 5 ---- .../api-change-supplychain-68141.json | 5 ---- .../api-change-transfer-15570.json | 5 ---- CHANGELOG.rst | 10 +++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 9 files changed, 39 insertions(+), 27 deletions(-) create mode 100644 .changes/1.35.40.json delete mode 100644 .changes/next-release/api-change-codepipeline-98058.json delete mode 100644 .changes/next-release/api-change-mailmanager-85745.json delete mode 100644 .changes/next-release/api-change-securitylake-53848.json delete mode 100644 .changes/next-release/api-change-supplychain-68141.json delete mode 100644 .changes/next-release/api-change-transfer-15570.json diff --git a/.changes/1.35.40.json b/.changes/1.35.40.json new file mode 100644 index 0000000000..5bcd920b80 --- /dev/null +++ b/.changes/1.35.40.json @@ -0,0 +1,27 @@ +[ + { + "category": "``codepipeline``", + "description": "AWS CodePipeline V2 type pipelines now support automatically retrying failed stages and skipping stage for failed entry conditions.", + "type": "api-change" + }, + { + "category": "``mailmanager``", + "description": "Mail Manager support for viewing and exporting metadata of archived messages.", + "type": "api-change" + }, + { + "category": "``securitylake``", + "description": "This release updates request validation regex for resource ARNs.", + "type": "api-change" + }, + { + "category": "``supplychain``", + "description": "This release adds AWS Supply Chain instance management functionality. Specifically adding CreateInstance, DeleteInstance, GetInstance, ListInstances, and UpdateInstance APIs.", + "type": "api-change" + }, + { + "category": "``transfer``", + "description": "This release enables customers using SFTP connectors to query the transfer status of their files to meet their monitoring needs as well as orchestrate post transfer actions.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-codepipeline-98058.json b/.changes/next-release/api-change-codepipeline-98058.json deleted file mode 100644 index ed09441751..0000000000 --- a/.changes/next-release/api-change-codepipeline-98058.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``codepipeline``", - "description": "AWS CodePipeline V2 type pipelines now support automatically retrying failed stages and skipping stage for failed entry conditions." -} diff --git a/.changes/next-release/api-change-mailmanager-85745.json b/.changes/next-release/api-change-mailmanager-85745.json deleted file mode 100644 index 8fd0610c72..0000000000 --- a/.changes/next-release/api-change-mailmanager-85745.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``mailmanager``", - "description": "Mail Manager support for viewing and exporting metadata of archived messages." -} diff --git a/.changes/next-release/api-change-securitylake-53848.json b/.changes/next-release/api-change-securitylake-53848.json deleted file mode 100644 index 8c313b325b..0000000000 --- a/.changes/next-release/api-change-securitylake-53848.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``securitylake``", - "description": "This release updates request validation regex for resource ARNs." -} diff --git a/.changes/next-release/api-change-supplychain-68141.json b/.changes/next-release/api-change-supplychain-68141.json deleted file mode 100644 index d1198fae04..0000000000 --- a/.changes/next-release/api-change-supplychain-68141.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``supplychain``", - "description": "This release adds AWS Supply Chain instance management functionality. Specifically adding CreateInstance, DeleteInstance, GetInstance, ListInstances, and UpdateInstance APIs." -} diff --git a/.changes/next-release/api-change-transfer-15570.json b/.changes/next-release/api-change-transfer-15570.json deleted file mode 100644 index 53d73f28ce..0000000000 --- a/.changes/next-release/api-change-transfer-15570.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``transfer``", - "description": "This release enables customers using SFTP connectors to query the transfer status of their files to meet their monitoring needs as well as orchestrate post transfer actions." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f6e102778b..04a9bae090 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,16 @@ CHANGELOG ========= +1.35.40 +======= + +* api-change:``codepipeline``: AWS CodePipeline V2 type pipelines now support automatically retrying failed stages and skipping stage for failed entry conditions. +* api-change:``mailmanager``: Mail Manager support for viewing and exporting metadata of archived messages. +* api-change:``securitylake``: This release updates request validation regex for resource ARNs. +* api-change:``supplychain``: This release adds AWS Supply Chain instance management functionality. Specifically adding CreateInstance, DeleteInstance, GetInstance, ListInstances, and UpdateInstance APIs. +* api-change:``transfer``: This release enables customers using SFTP connectors to query the transfer status of their files to meet their monitoring needs as well as orchestrate post transfer actions. + + 1.35.39 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 964feba740..eed4a57bea 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.39' +__version__ = '1.35.40' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 855ad6365f..549cb4e6a2 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.39' +release = '1.35.40' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.