diff --git a/clients/client-cloudwatch-logs/src/commands/CreateDeliveryCommand.ts b/clients/client-cloudwatch-logs/src/commands/CreateDeliveryCommand.ts index 0d6f88c5ce82..0340564d416e 100644 --- a/clients/client-cloudwatch-logs/src/commands/CreateDeliveryCommand.ts +++ b/clients/client-cloudwatch-logs/src/commands/CreateDeliveryCommand.ts @@ -35,7 +35,7 @@ export interface CreateDeliveryCommandOutput extends CreateDeliveryResponse, __M * Enabling * logging from Amazon Web Services services. *
- *A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.
+ *A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.
*To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
*A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. - * The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. + * The destination can be CloudWatch Logs, Amazon S3, or Firehose. * Only some Amazon Web Services services support being configured as a delivery source. These services are listed * in Enable logging from Amazon Web Services * services. diff --git a/clients/client-cloudwatch-logs/src/commands/GetDeliveryCommand.ts b/clients/client-cloudwatch-logs/src/commands/GetDeliveryCommand.ts index 00d9592b66fd..568802f80b4b 100644 --- a/clients/client-cloudwatch-logs/src/commands/GetDeliveryCommand.ts +++ b/clients/client-cloudwatch-logs/src/commands/GetDeliveryCommand.ts @@ -35,7 +35,7 @@ export interface GetDeliveryCommandOutput extends GetDeliveryResponse, __Metadat * delivery destination * .
*A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination.
- * The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
+ * The destination can be CloudWatch Logs, Amazon S3, or Firehose.
* Only some Amazon Web Services services support being configured as a delivery source. These services are listed
* in Enable logging from Amazon Web Services
* services.
diff --git a/clients/client-cloudwatch-logs/src/commands/ListAnomaliesCommand.ts b/clients/client-cloudwatch-logs/src/commands/ListAnomaliesCommand.ts
index 7626e8210bb2..ad1190381149 100644
--- a/clients/client-cloudwatch-logs/src/commands/ListAnomaliesCommand.ts
+++ b/clients/client-cloudwatch-logs/src/commands/ListAnomaliesCommand.ts
@@ -61,7 +61,10 @@ export interface ListAnomaliesCommandOutput extends ListAnomaliesResponse, __Met
* // "
A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. * Account-level subscription filter policies apply to both existing log groups and log groups that are created later in - * this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and + * this account. Supported destinations are Kinesis Data Streams, Firehose, and * Lambda. When log events are sent to the receiving service, they are Base64 encoded and * compressed with the GZIP format.
*The following destinations are supported for subscription filters:
@@ -74,14 +74,14 @@ export interface PutAccountPolicyCommandOutput extends PutAccountPolicyResponse, *An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
*An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
+ *An Firehose data stream in the same account as the subscription policy, for same-account delivery.
*A Lambda function in the same account as the subscription policy, for same-account delivery.
*A logical destination in a different account created with PutDestination, for cross-account - * delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
+ * delivery. Kinesis Data Streams and Firehose are supported as logical destinations. *Each account can have one account-level subscription filter policy. diff --git a/clients/client-cloudwatch-logs/src/commands/PutDeliveryDestinationCommand.ts b/clients/client-cloudwatch-logs/src/commands/PutDeliveryDestinationCommand.ts index 3d77d9244d74..242e1ec62ea9 100644 --- a/clients/client-cloudwatch-logs/src/commands/PutDeliveryDestinationCommand.ts +++ b/clients/client-cloudwatch-logs/src/commands/PutDeliveryDestinationCommand.ts @@ -29,7 +29,7 @@ export interface PutDeliveryDestinationCommandOutput extends PutDeliveryDestinat /** *
Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an * Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and - * Kinesis Data Firehose are supported as logs delivery destinations.
+ * Firehose are supported as logs delivery destinations. *To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
*Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an - * logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
+ * logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. *To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following:
*A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. - * We currently support Kinesis Data Streams and Kinesis Data Firehose as logical destinations.
+ * We currently support Kinesis Data Streams and Firehose as logical destinations. *An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as diff --git a/clients/client-cloudwatch-logs/src/models/models_0.ts b/clients/client-cloudwatch-logs/src/models/models_0.ts index bc829d29d557..1838d72434b4 100644 --- a/clients/client-cloudwatch-logs/src/models/models_0.ts +++ b/clients/client-cloudwatch-logs/src/models/models_0.ts @@ -99,6 +99,25 @@ export interface AccountPolicy { accountId?: string; } +/** + *
This structure contains the information for one sample log event that is associated + * with an anomaly found by a log anomaly detector.
+ * @public + */ +export interface LogEvent { + /** + *The time stamp of the log event.
+ * @public + */ + timestamp?: number; + + /** + *The message content of the log event.
+ * @public + */ + message?: string; +} + /** *A tructures that contains information about one pattern token related to * an anomaly.
@@ -247,7 +266,7 @@ export interface Anomaly { *An array of sample log event messages that are considered to be part of this anomaly.
* @public */ - logSamples: string[] | undefined; + logSamples: LogEvent[] | undefined; /** *An array of structures where each structure contains information about one token that makes up the pattern.
@@ -654,7 +673,7 @@ export interface Delivery { deliveryDestinationArn?: string; /** - *Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
+ *Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Firehose.
* @public */ deliveryDestinationType?: DeliveryDestinationType; @@ -1262,7 +1281,7 @@ export interface DeleteSubscriptionFilterRequest { export interface DeliveryDestinationConfiguration { /** *The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination - * can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.
+ * can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. * @public */ destinationResourceArn: string | undefined; @@ -1288,7 +1307,7 @@ export type OutputFormat = (typeof OutputFormat)[keyof typeof OutputFormat]; /** *This structure contains information about one delivery destination in your account. * A delivery destination is an Amazon Web Services resource that represents an - * Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, are supported as Kinesis Data Firehose delivery destinations.
+ * Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, are supported as Firehose delivery destinations. *To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
*Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
+ *Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Firehose.
* @public */ deliveryDestinationType?: DeliveryDestinationType; @@ -1357,7 +1376,7 @@ export interface DeliveryDestination { /** *This structure contains information about one delivery source in your account. * A delivery source is an Amazon Web Services resource that sends logs to an - * Amazon Web Services destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
+ * Amazon Web Services destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. *Only some Amazon Web Services services support being configured as a delivery source. These services are listed
* as Supported [V2 Permissions] in the table at
* Enabling
@@ -3928,7 +3947,7 @@ export interface PutAccountPolicyRequest {
* sensitive data terms. This Audit
action must contain a FindingsDestination
* object. You can optionally use that FindingsDestination
object to list one or more
* destinations to send audit findings to. If you specify destinations such as log groups,
- * Kinesis Data Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an
@@ -3965,14 +3984,14 @@ export interface PutAccountPolicyRequest {
*
An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
*An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
+ *An Firehose data stream in the same account as the subscription policy, for same-account delivery.
*A Lambda function in the same account as the subscription policy, for same-account delivery.
*A logical destination in a different account created with PutDestination, for cross-account - * delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
+ * delivery. Kinesis Data Streams and Firehose are supported as logical destinations. *Audit
action must contain a FindingsDestination
* object. You can optionally use that FindingsDestination
object to list one or more
* destinations to send audit findings to. If you specify destinations such as log groups,
- * Kinesis Data Firehose streams, and S3 buckets, they must already exist.
+ * Firehose streams, and S3 buckets, they must already exist.
* The second block must include both a DataIdentifer
array and an
@@ -4200,8 +4219,21 @@ export interface PutDeliverySourceRequest {
resourceArn: string | undefined;
/**
- *
Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is + *
Defines the type of log that the source is sending.
+ *For Amazon CodeWhisperer, the valid value is
* EVENT_LOGS
.
For IAM Identity Centerr, the valid value is
+ * ERROR_LOGS
.
For Amazon WorkMail, the valid values are
+ * ACCESS_CONTROL_LOGS
, AUTHENTICATION_LOGS
, WORKMAIL_AVAILABILITY_PROVIDER_LOGS
, and WORKMAIL_MAILBOX_ACCESS_LOGS
.
The log events that are too new.
+ *The index of the first log event that is too new. This field is inclusive.
* @public */ tooNewLogEventStartIndex?: number; /** - *The log events that are dated too far in the past.
+ *The index of the last log event that is too old. This field is exclusive.
* @public */ tooOldLogEventEndIndex?: number; diff --git a/clients/client-cloudwatch-logs/src/protocols/Aws_json1_1.ts b/clients/client-cloudwatch-logs/src/protocols/Aws_json1_1.ts index 2dd3a5bf0713..5f4177ad848f 100644 --- a/clients/client-cloudwatch-logs/src/protocols/Aws_json1_1.ts +++ b/clients/client-cloudwatch-logs/src/protocols/Aws_json1_1.ts @@ -3479,6 +3479,8 @@ const de_GetQueryResultsResponse = (output: any, context: __SerdeContext): GetQu // de_LiveTailSessionUpdate omitted. +// de_LogEvent omitted. + // de_LogGroup omitted. // de_LogGroupArnList omitted. diff --git a/codegen/sdk-codegen/aws-models/cloudwatch-logs.json b/codegen/sdk-codegen/aws-models/cloudwatch-logs.json index 8c0f17448693..ff44e31cb709 100644 --- a/codegen/sdk-codegen/aws-models/cloudwatch-logs.json +++ b/codegen/sdk-codegen/aws-models/cloudwatch-logs.json @@ -586,7 +586,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a delivery. A delivery is a connection between a logical delivery source and a logical\n delivery destination\n that you have already created.
\nOnly some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n
\nA delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.
\nTo configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
\nCreate a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.
\nCreate a delivery destination, which is a logical object that represents the actual\n delivery destination. For more \n information, see PutDeliveryDestination.
\nIf you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nUse CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination.\n
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
\nYou can't update an existing delivery. You can only create and delete deliveries.
" + "smithy.api#documentation": "Creates a delivery. A delivery is a connection between a logical delivery source and a logical\n delivery destination\n that you have already created.
\nOnly some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n
\nA delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.
\nTo configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
\nCreate a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.
\nCreate a delivery destination, which is a logical object that represents the actual\n delivery destination. For more \n information, see PutDeliveryDestination.
\nIf you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nUse CreateDelivery
to create a delivery by pairing exactly one delivery source and one delivery destination.\n
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
\nYou can't update an existing delivery. You can only create and delete deliveries.
" } }, "com.amazonaws.cloudwatchlogs#CreateDeliveryRequest": { @@ -1696,7 +1696,7 @@ "deliveryDestinationType": { "target": "com.amazonaws.cloudwatchlogs#DeliveryDestinationType", "traits": { - "smithy.api#documentation": "Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
" + "smithy.api#documentation": "Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Firehose.
" } }, "tags": { @@ -1728,7 +1728,7 @@ "deliveryDestinationType": { "target": "com.amazonaws.cloudwatchlogs#DeliveryDestinationType", "traits": { - "smithy.api#documentation": "Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
" + "smithy.api#documentation": "Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Firehose.
" } }, "outputFormat": { @@ -1751,7 +1751,7 @@ } }, "traits": { - "smithy.api#documentation": "This structure contains information about one delivery destination in your account. \n A delivery destination is an Amazon Web Services resource that represents an \n Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, are supported as Kinesis Data Firehose delivery destinations.
\nTo configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
\nCreate a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.
\nCreate a delivery destination, which is a logical object that represents the actual\n delivery destination.
\nIf you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nCreate a delivery by pairing exactly one delivery source and one delivery destination.\n For more information, see CreateDelivery.
\nYou can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
" + "smithy.api#documentation": "This structure contains information about one delivery destination in your account. \n A delivery destination is an Amazon Web Services resource that represents an \n Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, are supported as Firehose delivery destinations.
\nTo configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
\nCreate a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.
\nCreate a delivery destination, which is a logical object that represents the actual\n delivery destination.
\nIf you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nCreate a delivery by pairing exactly one delivery source and one delivery destination.\n For more information, see CreateDelivery.
\nYou can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
" } }, "com.amazonaws.cloudwatchlogs#DeliveryDestinationConfiguration": { @@ -1760,7 +1760,7 @@ "destinationResourceArn": { "target": "com.amazonaws.cloudwatchlogs#Arn", "traits": { - "smithy.api#documentation": "The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination\n can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.
", + "smithy.api#documentation": "The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination\n can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.
", "smithy.api#required": {} } } @@ -1868,7 +1868,7 @@ } }, "traits": { - "smithy.api#documentation": "This structure contains information about one delivery source in your account. \n A delivery source is an Amazon Web Services resource that sends logs to an\n Amazon Web Services destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
\nOnly some Amazon Web Services services support being configured as a delivery source. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n
\nTo configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
\nCreate a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.
\nCreate a delivery destination, which is a logical object that represents the actual\n delivery destination. For more \n information, see PutDeliveryDestination.
\nIf you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nCreate a delivery by pairing exactly one delivery source and one delivery destination.\n For more information, see CreateDelivery.
\nYou can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
" + "smithy.api#documentation": "This structure contains information about one delivery source in your account. \n A delivery source is an Amazon Web Services resource that sends logs to an\n Amazon Web Services destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose.
\nOnly some Amazon Web Services services support being configured as a delivery source. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n
\nTo configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
\nCreate a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.
\nCreate a delivery destination, which is a logical object that represents the actual\n delivery destination. For more \n information, see PutDeliveryDestination.
\nIf you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nCreate a delivery by pairing exactly one delivery source and one delivery destination.\n For more information, see CreateDelivery.
\nYou can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
" } }, "com.amazonaws.cloudwatchlogs#DeliverySourceName": { @@ -1980,7 +1980,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves a list of the deliveries that have been created in the account.
\nA delivery is a \n connection between a \n delivery source\n and a \n \n delivery destination\n .
\nA delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. \n The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. \n Only some Amazon Web Services services support being configured as a delivery source. These services are listed\n in Enable logging from Amazon Web Services \n services.\n
", + "smithy.api#documentation": "Retrieves a list of the deliveries that have been created in the account.
\nA delivery is a \n connection between a \n delivery source\n and a \n \n delivery destination\n .
\nA delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. \n The destination can be CloudWatch Logs, Amazon S3, or Firehose. \n Only some Amazon Web Services services support being configured as a delivery source. These services are listed\n in Enable logging from Amazon Web Services \n services.\n
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -3618,7 +3618,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns complete information about one logical delivery. A delivery is a \n connection between a \n delivery source\n and a \n \n delivery destination\n .
\nA delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. \n The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. \n Only some Amazon Web Services services support being configured as a delivery source. These services are listed\n in Enable logging from Amazon Web Services \n services.\n
\nYou need to specify the delivery id
in this operation. You can find the IDs of the deliveries in your account with the \n DescribeDeliveries operation.
Returns complete information about one logical delivery. A delivery is a \n connection between a \n delivery source\n and a \n \n delivery destination\n .
\nA delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. \n The destination can be CloudWatch Logs, Amazon S3, or Firehose. \n Only some Amazon Web Services services support being configured as a delivery source. These services are listed\n in Enable logging from Amazon Web Services \n services.\n
\nYou need to specify the delivery id
in this operation. You can find the IDs of the deliveries in your account with the \n DescribeDeliveries operation.
The time stamp of the log event.
" + } + }, + "message": { + "target": "com.amazonaws.cloudwatchlogs#EventMessage", + "traits": { + "smithy.api#documentation": "The message content of the log event.
" + } } + }, + "traits": { + "smithy.api#documentation": "This structure contains the information for one sample log event that is associated \n with an anomaly found by a log anomaly detector.
" } }, "com.amazonaws.cloudwatchlogs#LogEventIndex": { @@ -6796,7 +6808,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an account-level data protection policy or subscription filter policy that applies to all log groups \n or a subset of log groups in the account.
\n\n Data protection policy\n
\nA data protection policy can help safeguard sensitive \n data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only\n one account-level data protection policy.
\nSensitive data is detected and masked when it is ingested into a log group. When you set a \n data protection policy, log events ingested into the log groups before that time are not masked.
\nIf you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups\n and all log groups that are created later in this account. The account-level policy is applied to existing log groups\n with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks.\n A user who has the logs:Unmask
permission can use a \n GetLogEvents or \n FilterLogEvents\n operation with the unmask
parameter set to true
to view the unmasked \n log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs\n console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see\n Protect sensitive log data with masking.
\nTo use the PutAccountPolicy
operation for a data protection policy, you must be signed on with \n the logs:PutDataProtectionPolicy
\n and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use \n PutDataProtectionPolicy\n to create a data protection policy that applies to just one log group. \n If a log group has its own data protection policy and \n the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.
\n Subscription filter policy\n
\nA subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services.\n Account-level subscription filter policies apply to both existing log groups and log groups that are created later in \n this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and \n Lambda. When log events are sent to the receiving service, they are Base64 encoded and \n compressed with the GZIP format.
\nThe following destinations are supported for subscription filters:
\nAn Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
\nAn Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
\nA Lambda function in the same account as the subscription policy, for same-account delivery.
\nA logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
\nEach account can have one account-level subscription filter policy. \n If you are updating an existing filter, you must specify the correct name in PolicyName
.\n To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda \n function, you must also have the iam:PassRole
permission.
Creates an account-level data protection policy or subscription filter policy that applies to all log groups \n or a subset of log groups in the account.
\n\n Data protection policy\n
\nA data protection policy can help safeguard sensitive \n data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only\n one account-level data protection policy.
\nSensitive data is detected and masked when it is ingested into a log group. When you set a \n data protection policy, log events ingested into the log groups before that time are not masked.
\nIf you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups\n and all log groups that are created later in this account. The account-level policy is applied to existing log groups\n with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks.\n A user who has the logs:Unmask
permission can use a \n GetLogEvents or \n FilterLogEvents\n operation with the unmask
parameter set to true
to view the unmasked \n log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs\n console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see\n Protect sensitive log data with masking.
\nTo use the PutAccountPolicy
operation for a data protection policy, you must be signed on with \n the logs:PutDataProtectionPolicy
\n and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use \n PutDataProtectionPolicy\n to create a data protection policy that applies to just one log group. \n If a log group has its own data protection policy and \n the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.
\n Subscription filter policy\n
\nA subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services.\n Account-level subscription filter policies apply to both existing log groups and log groups that are created later in \n this account. Supported destinations are Kinesis Data Streams, Firehose, and \n Lambda. When log events are sent to the receiving service, they are Base64 encoded and \n compressed with the GZIP format.
\nThe following destinations are supported for subscription filters:
\nAn Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
\nAn Firehose data stream in the same account as the subscription policy, for same-account delivery.
\nA Lambda function in the same account as the subscription policy, for same-account delivery.
\nA logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
\nEach account can have one account-level subscription filter policy. \n If you are updating an existing filter, you must specify the correct name in PolicyName
.\n To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda \n function, you must also have the iam:PassRole
permission.
Specify the policy, in JSON.
\n\n Data protection policy\n
\nA data protection policy must include two JSON blocks:
\nThe first block must include both a DataIdentifer
array and an \n Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that\n you want to mask. For more information about the available options, see \n Types of data that you can mask.
The Operation
property with an Audit
action is required to find the \n sensitive data terms. This Audit
action must contain a FindingsDestination
\n object. You can optionally use that FindingsDestination
object to list one or more \n destinations to send audit findings to. If you specify destinations such as log groups, \n Kinesis Data Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an\n Operation
property with an Deidentify
action. The\n DataIdentifer
array must exactly match the DataIdentifer
array\n in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the \n data, and it must \n contain the \n \"MaskConfig\": {}
object. The \n \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
\nThe contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
,\n Description
, and Version
fields. The Name
is different than the \n operation's policyName
parameter, and is used as a dimension when\n CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters long.
\n Subscription filter policy\n
\nA subscription filter policy can include the following attributes in a JSON block:
\n\n DestinationArn The ARN of the destination\n to deliver log events to. Supported destinations are:
\nAn Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
\nAn Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
\nA Lambda function in the same account as the subscription policy, for same-account delivery.
\nA logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
\n\n RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log\n events to the destination stream. You don't need to provide the ARN when you are working with\n a logical destination for cross-account delivery.
\n\n FilterPattern A filter pattern for subscribing to a \n filtered stream of log events.
\n\n DistributionThe method used to distribute log data to the destination. \n By default, log data is\n grouped by log stream, but the grouping can be set to Random
for a more even distribution.\n This property is only applicable when the destination is an Kinesis Data Streams data stream.
Specify the policy, in JSON.
\n\n Data protection policy\n
\nA data protection policy must include two JSON blocks:
\nThe first block must include both a DataIdentifer
array and an \n Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that\n you want to mask. For more information about the available options, see \n Types of data that you can mask.
The Operation
property with an Audit
action is required to find the \n sensitive data terms. This Audit
action must contain a FindingsDestination
\n object. You can optionally use that FindingsDestination
object to list one or more \n destinations to send audit findings to. If you specify destinations such as log groups, \n Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an\n Operation
property with an Deidentify
action. The\n DataIdentifer
array must exactly match the DataIdentifer
array\n in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the \n data, and it must \n contain the \n \"MaskConfig\": {}
object. The \n \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
\nThe contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
,\n Description
, and Version
fields. The Name
is different than the \n operation's policyName
parameter, and is used as a dimension when\n CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters long.
\n Subscription filter policy\n
\nA subscription filter policy can include the following attributes in a JSON block:
\n\n DestinationArn The ARN of the destination\n to deliver log events to. Supported destinations are:
\nAn Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
\nAn Firehose data stream in the same account as the subscription policy, for same-account delivery.
\nA Lambda function in the same account as the subscription policy, for same-account delivery.
\nA logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
\n\n RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log\n events to the destination stream. You don't need to provide the ARN when you are working with\n a logical destination for cross-account delivery.
\n\n FilterPattern A filter pattern for subscribing to a \n filtered stream of log events.
\n\n DistributionThe method used to distribute log data to the destination. \n By default, log data is\n grouped by log stream, but the grouping can be set to Random
for a more even distribution.\n This property is only applicable when the destination is an Kinesis Data Streams data stream.
Specify the data protection policy, in JSON.
\nThis policy must include two JSON blocks:
\nThe first block must include both a DataIdentifer
array and an \n Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that\n you want to mask. For more information about the available options, see \n Types of data that you can mask.
The Operation
property with an Audit
action is required to find the \n sensitive data terms. This Audit
action must contain a FindingsDestination
\n object. You can optionally use that FindingsDestination
object to list one or more \n destinations to send audit findings to. If you specify destinations such as log groups, \n Kinesis Data Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an\n Operation
property with an Deidentify
action. The\n DataIdentifer
array must exactly match the DataIdentifer
array\n in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the \n data, and it must \n contain the \n \"MaskConfig\": {}
object. The \n \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
\nThe contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
,\n Description
, and Version
fields. The Name
is used as a dimension when\n CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters.
Specify the data protection policy, in JSON.
\nThis policy must include two JSON blocks:
\nThe first block must include both a DataIdentifer
array and an \n Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that\n you want to mask. For more information about the available options, see \n Types of data that you can mask.
The Operation
property with an Audit
action is required to find the \n sensitive data terms. This Audit
action must contain a FindingsDestination
\n object. You can optionally use that FindingsDestination
object to list one or more \n destinations to send audit findings to. If you specify destinations such as log groups, \n Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an\n Operation
property with an Deidentify
action. The\n DataIdentifer
array must exactly match the DataIdentifer
array\n in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the \n data, and it must \n contain the \n \"MaskConfig\": {}
object. The \n \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
\nThe contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
,\n Description
, and Version
fields. The Name
is used as a dimension when\n CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters.
Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an \n Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and\n Kinesis Data Firehose are supported as logs delivery destinations.
\nTo configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
\nCreate a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.
\nUse PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual\n delivery destination.
If you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nUse CreateDelivery
to create a delivery by pairing exactly \n one delivery source and one delivery destination. For more \n information, see CreateDelivery.\n
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
\nOnly some Amazon Web Services services support being configured as a delivery source. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n
\nIf you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten\n with the new parameter values that you specify.
" + "smithy.api#documentation": "Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an \n Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and\n Firehose are supported as logs delivery destinations.
\nTo configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
\nCreate a delivery source, which is a logical object that represents the resource that is actually\n sending the logs. For more \n information, see PutDeliverySource.
\nUse PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual\n delivery destination.
If you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nUse CreateDelivery
to create a delivery by pairing exactly \n one delivery source and one delivery destination. For more \n information, see CreateDelivery.\n
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
\nOnly some Amazon Web Services services support being configured as a delivery source. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n
\nIf you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten\n with the new parameter values that you specify.
" } }, "com.amazonaws.cloudwatchlogs#PutDeliveryDestinationPolicy": { @@ -7102,7 +7114,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an\n logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
\nTo configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following:
\nUse PutDeliverySource
to create a delivery source, which is a logical object that represents the resource that is actually\n sending the logs.
Use PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual\n delivery destination. For more \n information, see PutDeliveryDestination.
If you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nUse CreateDelivery
to create a delivery by pairing exactly \n one delivery source and one delivery destination. For more \n information, see CreateDelivery.\n
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
\nOnly some Amazon Web Services services support being configured as a delivery source. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n
\nIf you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten\n with the new parameter values that you specify.
" + "smithy.api#documentation": "Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an\n logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose.
\nTo configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following:
\nUse PutDeliverySource
to create a delivery source, which is a logical object that represents the resource that is actually\n sending the logs.
Use PutDeliveryDestination
to create a delivery destination, which is a logical object that represents the actual\n delivery destination. For more \n information, see PutDeliveryDestination.
If you are delivering logs cross-account, you must use \n PutDeliveryDestinationPolicy\n in the destination account to assign an IAM policy to the \n destination. This policy allows delivery to that destination.\n
\nUse CreateDelivery
to create a delivery by pairing exactly \n one delivery source and one delivery destination. For more \n information, see CreateDelivery.\n
You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You \n can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
\nOnly some Amazon Web Services services support being configured as a delivery source. These services are listed\n as Supported [V2 Permissions] in the table at \n Enabling \n logging from Amazon Web Services services.\n
\nIf you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten\n with the new parameter values that you specify.
" } }, "com.amazonaws.cloudwatchlogs#PutDeliverySourceRequest": { @@ -7125,7 +7137,7 @@ "logType": { "target": "com.amazonaws.cloudwatchlogs#LogType", "traits": { - "smithy.api#documentation": "Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is \n EVENT_LOGS
.
Defines the type of log that the source is sending.
\nFor Amazon CodeWhisperer, the valid value is \n EVENT_LOGS
.
For IAM Identity Centerr, the valid value is \n ERROR_LOGS
.
For Amazon WorkMail, the valid values are \n ACCESS_CONTROL_LOGS
, AUTHENTICATION_LOGS
, WORKMAIL_AVAILABILITY_PROVIDER_LOGS
, and WORKMAIL_MAILBOX_ACCESS_LOGS
.
Creates or updates a subscription filter and associates it with the specified log\n group. With subscription filters, you can subscribe to a real-time stream of log events\n ingested through PutLogEvents\n and have them delivered to a specific destination. When log events are sent to the receiving\n service, they are Base64 encoded and compressed with the GZIP format.
\nThe following destinations are supported for subscription filters:
\nAn Amazon Kinesis data stream belonging to the same account as the subscription\n filter, for same-account delivery.
\nA logical destination created with PutDestination that belongs to a different account, for cross-account delivery.\n We currently support Kinesis Data Streams and Kinesis Data Firehose as logical destinations.
\nAn Amazon Kinesis Data Firehose delivery stream that belongs to the same account as\n the subscription filter, for same-account delivery.
\nAn Lambda function that belongs to the same account as the\n subscription filter, for same-account delivery.
\nEach log group can have up to two subscription filters associated with it. If you are\n updating an existing filter, you must specify the correct name in filterName
.\n
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, \n you must also have the \n iam:PassRole
permission.
Creates or updates a subscription filter and associates it with the specified log\n group. With subscription filters, you can subscribe to a real-time stream of log events\n ingested through PutLogEvents\n and have them delivered to a specific destination. When log events are sent to the receiving\n service, they are Base64 encoded and compressed with the GZIP format.
\nThe following destinations are supported for subscription filters:
\nAn Amazon Kinesis data stream belonging to the same account as the subscription\n filter, for same-account delivery.
\nA logical destination created with PutDestination that belongs to a different account, for cross-account delivery.\n We currently support Kinesis Data Streams and Firehose as logical destinations.
\nAn Amazon Kinesis Data Firehose delivery stream that belongs to the same account as\n the subscription filter, for same-account delivery.
\nAn Lambda function that belongs to the same account as the\n subscription filter, for same-account delivery.
\nEach log group can have up to two subscription filters associated with it. If you are\n updating an existing filter, you must specify the correct name in filterName
.\n
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, \n you must also have the \n iam:PassRole
permission.
The log events that are too new.
" + "smithy.api#documentation": "The index of the first log event that is too new. This field is inclusive.
" } }, "tooOldLogEventEndIndex": { "target": "com.amazonaws.cloudwatchlogs#LogEventIndex", "traits": { - "smithy.api#documentation": "The log events that are dated too far in the past.
" + "smithy.api#documentation": "The index of the last log event that is too old. This field is exclusive.
" } }, "expiredLogEventEndIndex": {