diff --git a/clients/client-cloudwatch-logs/src/commands/PutAccountPolicyCommand.ts b/clients/client-cloudwatch-logs/src/commands/PutAccountPolicyCommand.ts index c72737c1c5c4..c03d760e829f 100644 --- a/clients/client-cloudwatch-logs/src/commands/PutAccountPolicyCommand.ts +++ b/clients/client-cloudwatch-logs/src/commands/PutAccountPolicyCommand.ts @@ -85,7 +85,7 @@ export interface PutAccountPolicyCommandOutput extends PutAccountPolicyResponse, * delivery. Kinesis Data Streams and Firehose are supported as logical destinations.

* * - *

Each account can have one account-level subscription filter policy. + *

Each account can have one account-level subscription filter policy per Region. * If you are updating an existing filter, you must specify the correct name in PolicyName. * To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda * function, you must also have the iam:PassRole permission.

diff --git a/clients/client-cloudwatch-logs/src/commands/PutLogEventsCommand.ts b/clients/client-cloudwatch-logs/src/commands/PutLogEventsCommand.ts index 74d7072017dd..cc2b58d859b9 100644 --- a/clients/client-cloudwatch-logs/src/commands/PutLogEventsCommand.ts +++ b/clients/client-cloudwatch-logs/src/commands/PutLogEventsCommand.ts @@ -93,6 +93,14 @@ export interface PutLogEventsCommandOutput extends PutLogEventsResponse, __Metad * }, * ], * sequenceToken: "STRING_VALUE", + * entity: { // Entity + * keyAttributes: { // EntityKeyAttributes + * "": "STRING_VALUE", + * }, + * attributes: { // EntityAttributes + * "": "STRING_VALUE", + * }, + * }, * }; * const command = new PutLogEventsCommand(input); * const response = await client.send(command); @@ -103,6 +111,9 @@ export interface PutLogEventsCommandOutput extends PutLogEventsResponse, __Metad * // tooOldLogEventEndIndex: Number("int"), * // expiredLogEventEndIndex: Number("int"), * // }, + * // rejectedEntityInfo: { // RejectedEntityInfo + * // errorType: "InvalidEntity" || "InvalidTypeValue" || "InvalidKeyAttributes" || "InvalidAttributes" || "EntitySizeTooLarge" || "UnsupportedLogGroupType" || "MissingRequiredFields", // required + * // }, * // }; * * ``` diff --git a/clients/client-cloudwatch-logs/src/commands/PutMetricFilterCommand.ts b/clients/client-cloudwatch-logs/src/commands/PutMetricFilterCommand.ts index 17eb3faeebfa..29d9366bd42c 100644 --- a/clients/client-cloudwatch-logs/src/commands/PutMetricFilterCommand.ts +++ b/clients/client-cloudwatch-logs/src/commands/PutMetricFilterCommand.ts @@ -33,6 +33,12 @@ export interface PutMetricFilterCommandOutput extends __MetadataBearer {} * through PutLogEvents.

*

The maximum number of metric filters that can be associated with a log group is * 100.

+ *

Using regular expressions to create metric filters is supported. For these filters, + * there is a quotas of quota of two regular expression patterns within a single filter pattern. There + * is also a quota of five regular expression patterns per log group. + * For more information about using regular expressions in metric filters, + * see + * Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

*

When you create a metric filter, you can also optionally assign a unit and dimensions * to the metric that is created.

* diff --git a/clients/client-cloudwatch-logs/src/commands/PutSubscriptionFilterCommand.ts b/clients/client-cloudwatch-logs/src/commands/PutSubscriptionFilterCommand.ts index 8b6dfbf92ffe..5e22b01467cd 100644 --- a/clients/client-cloudwatch-logs/src/commands/PutSubscriptionFilterCommand.ts +++ b/clients/client-cloudwatch-logs/src/commands/PutSubscriptionFilterCommand.ts @@ -55,6 +55,12 @@ export interface PutSubscriptionFilterCommandOutput extends __MetadataBearer {} *

Each log group can have up to two subscription filters associated with it. If you are * updating an existing filter, you must specify the correct name in filterName. *

+ *

Using regular expressions to create subscription filters is supported. For these filters, + * there is a quotas of quota of two regular expression patterns within a single filter pattern. There + * is also a quota of five regular expression patterns per log group. + * For more information about using regular expressions in subscription filters, + * see + * Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

*

To perform a PutSubscriptionFilter operation for any destination except a Lambda function, * you must also have the * iam:PassRole permission.

diff --git a/clients/client-cloudwatch-logs/src/commands/StartLiveTailCommand.ts b/clients/client-cloudwatch-logs/src/commands/StartLiveTailCommand.ts index 2b2b81f83baf..da33d3eae952 100644 --- a/clients/client-cloudwatch-logs/src/commands/StartLiveTailCommand.ts +++ b/clients/client-cloudwatch-logs/src/commands/StartLiveTailCommand.ts @@ -58,11 +58,11 @@ export interface StartLiveTailCommandOutput extends StartLiveTailResponse, __Met * which it starts dropping the oldest events.

* *
  • - *

    A SessionStreamingException + *

    A SessionStreamingException * object is returned if an unknown error occurs on the server side.

    *
  • *
  • - *

    A SessionTimeoutException + *

    A SessionTimeoutException * object is returned when the session times out, after it has been kept open for three hours.

    *
  • * diff --git a/clients/client-cloudwatch-logs/src/models/models_0.ts b/clients/client-cloudwatch-logs/src/models/models_0.ts index 1838d72434b4..45f33a4deeb8 100644 --- a/clients/client-cloudwatch-logs/src/models/models_0.ts +++ b/clients/client-cloudwatch-logs/src/models/models_0.ts @@ -119,7 +119,7 @@ export interface LogEvent { } /** - *

    A tructures that contains information about one pattern token related to + *

    A structure that contains information about one pattern token related to * an anomaly.

    *

    For more information about patterns and tokens, see CreateLogAnomalyDetector. *

    @@ -2844,6 +2844,43 @@ export interface DisassociateKmsKeyRequest { resourceIdentifier?: string; } +/** + *

    Reserved for future use.

    + * @public + */ +export interface Entity { + /** + *

    Reserved for future use.

    + * @public + */ + keyAttributes?: Record; + + /** + *

    Reserved for future use.

    + * @public + */ + attributes?: Record; +} + +/** + * @public + * @enum + */ +export const EntityRejectionErrorType = { + ENTITY_SIZE_TOO_LARGE: "EntitySizeTooLarge", + INVALID_ATTRIBUTES: "InvalidAttributes", + INVALID_ENTITY: "InvalidEntity", + INVALID_KEY_ATTRIBUTE: "InvalidKeyAttributes", + INVALID_TYPE_VALUE: "InvalidTypeValue", + MISSING_REQUIRED_FIELDS: "MissingRequiredFields", + UNSUPPORTED_LOG_GROUP_TYPE: "UnsupportedLogGroupType", +} as const; + +/** + * @public + */ +export type EntityRejectionErrorType = (typeof EntityRejectionErrorType)[keyof typeof EntityRejectionErrorType]; + /** *

    Represents a matched event.

    * @public @@ -2913,7 +2950,7 @@ export interface FilterLogEventsRequest { /** *

    Filters the results to only logs from the log streams in this list.

    - *

    If you specify a value for both logStreamNamePrefix and logStreamNames, the action + *

    If you specify a value for both logStreamNames and logStreamNamePrefix, the action * returns an InvalidParameterException error.

    * @public */ @@ -2921,9 +2958,8 @@ export interface FilterLogEventsRequest { /** *

    Filters the results to include only events from log streams that have names starting with this prefix.

    - *

    If you specify a value for both logStreamNamePrefix and logStreamNames, but the value for - * logStreamNamePrefix does not match any log stream names specified in logStreamNames, the action - * returns an InvalidParameterException error.

    + *

    If you specify a value for both logStreamNamePrefix and logStreamNames, the action + * returns an InvalidParameterException error.

    * @public */ logStreamNamePrefix?: string; @@ -4008,7 +4044,7 @@ export interface PutAccountPolicyRequest { * *
  • *

    - * DistributionThe method used to distribute log data to the destination. + * Distribution The method used to distribute log data to the destination. * By default, log data is * grouped by log stream, but the grouping can be set to Random for a more even distribution. * This property is only applicable when the destination is an Kinesis Data Streams data stream.

    @@ -4222,11 +4258,15 @@ export interface PutDeliverySourceRequest { *

    Defines the type of log that the source is sending.

    *
      *
    • + *

      For Amazon Bedrock, the valid value is + * APPLICATION_LOGS.

      + *
    • + *
    • *

      For Amazon CodeWhisperer, the valid value is * EVENT_LOGS.

      *
    • *
    • - *

      For IAM Identity Centerr, the valid value is + *

      For IAM Identity Center, the valid value is * ERROR_LOGS.

      *
    • *
    • @@ -4371,6 +4411,24 @@ export interface PutLogEventsRequest { * @public */ sequenceToken?: string; + + /** + *

      Reserved for future use.

      + * @public + */ + entity?: Entity; +} + +/** + *

      Reserved for future use.

      + * @public + */ +export interface RejectedEntityInfo { + /** + *

      Reserved for future use.

      + * @public + */ + errorType: EntityRejectionErrorType | undefined; } /** @@ -4421,6 +4479,12 @@ export interface PutLogEventsResponse { * @public */ rejectedLogEventsInfo?: RejectedLogEventsInfo; + + /** + *

      Reserved for future use.

      + * @public + */ + rejectedEntityInfo?: RejectedEntityInfo; } /** diff --git a/clients/client-cloudwatch-logs/src/protocols/Aws_json1_1.ts b/clients/client-cloudwatch-logs/src/protocols/Aws_json1_1.ts index 5f4177ad848f..89d211110b86 100644 --- a/clients/client-cloudwatch-logs/src/protocols/Aws_json1_1.ts +++ b/clients/client-cloudwatch-logs/src/protocols/Aws_json1_1.ts @@ -238,6 +238,7 @@ import { DescribeResourcePoliciesRequest, DescribeSubscriptionFiltersRequest, DisassociateKmsKeyRequest, + Entity, FilterLogEventsRequest, GetDataProtectionPolicyRequest, GetDeliveryDestinationPolicyRequest, @@ -3173,6 +3174,12 @@ const de_SessionTimeoutExceptionRes = async ( // se_DisassociateKmsKeyRequest omitted. +// se_Entity omitted. + +// se_EntityAttributes omitted. + +// se_EntityKeyAttributes omitted. + // se_FilterLogEventsRequest omitted. // se_GetDataProtectionPolicyRequest omitted. @@ -3613,6 +3620,8 @@ const de_QueryStatistics = (output: any, context: __SerdeContext): QueryStatisti }) as any; }; +// de_RejectedEntityInfo omitted. + // de_RejectedLogEventsInfo omitted. // de_ResourceAlreadyExistsException omitted. diff --git a/codegen/sdk-codegen/aws-models/cloudwatch-logs.json b/codegen/sdk-codegen/aws-models/cloudwatch-logs.json index ec025b725892..59e08ef2a1ed 100644 --- a/codegen/sdk-codegen/aws-models/cloudwatch-logs.json +++ b/codegen/sdk-codegen/aws-models/cloudwatch-logs.json @@ -3057,6 +3057,139 @@ } } }, + "com.amazonaws.cloudwatchlogs#Entity": { + "type": "structure", + "members": { + "keyAttributes": { + "target": "com.amazonaws.cloudwatchlogs#EntityKeyAttributes", + "traits": { + "smithy.api#documentation": "

      Reserved for future use.

      " + } + }, + "attributes": { + "target": "com.amazonaws.cloudwatchlogs#EntityAttributes", + "traits": { + "smithy.api#documentation": "

      Reserved for future use.

      " + } + } + }, + "traits": { + "smithy.api#documentation": "

      Reserved for future use.

      " + } + }, + "com.amazonaws.cloudwatchlogs#EntityAttributes": { + "type": "map", + "key": { + "target": "com.amazonaws.cloudwatchlogs#EntityAttributesKey" + }, + "value": { + "target": "com.amazonaws.cloudwatchlogs#EntityAttributesValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.cloudwatchlogs#EntityAttributesKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.cloudwatchlogs#EntityAttributesValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, + "com.amazonaws.cloudwatchlogs#EntityKeyAttributes": { + "type": "map", + "key": { + "target": "com.amazonaws.cloudwatchlogs#EntityKeyAttributesKey" + }, + "value": { + "target": "com.amazonaws.cloudwatchlogs#EntityKeyAttributesValue" + }, + "traits": { + "smithy.api#length": { + "min": 2, + "max": 3 + } + } + }, + "com.amazonaws.cloudwatchlogs#EntityKeyAttributesKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 32 + } + } + }, + "com.amazonaws.cloudwatchlogs#EntityKeyAttributesValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, + "com.amazonaws.cloudwatchlogs#EntityRejectionErrorType": { + "type": "enum", + "members": { + "INVALID_ENTITY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InvalidEntity" + } + }, + "INVALID_TYPE_VALUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InvalidTypeValue" + } + }, + "INVALID_KEY_ATTRIBUTE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InvalidKeyAttributes" + } + }, + "INVALID_ATTRIBUTES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InvalidAttributes" + } + }, + "ENTITY_SIZE_TOO_LARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EntitySizeTooLarge" + } + }, + "UNSUPPORTED_LOG_GROUP_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UnsupportedLogGroupType" + } + }, + "MISSING_REQUIRED_FIELDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MissingRequiredFields" + } + } + } + }, "com.amazonaws.cloudwatchlogs#Enumerations": { "type": "map", "key": { @@ -3385,13 +3518,13 @@ "logStreamNames": { "target": "com.amazonaws.cloudwatchlogs#InputLogStreamNames", "traits": { - "smithy.api#documentation": "

      Filters the results to only logs from the log streams in this list.

      \n

      If you specify a value for both logStreamNamePrefix and logStreamNames, the action\n returns an InvalidParameterException error.

      " + "smithy.api#documentation": "

      Filters the results to only logs from the log streams in this list.

      \n

      If you specify a value for both logStreamNames and logStreamNamePrefix, the action\n returns an InvalidParameterException error.

      " } }, "logStreamNamePrefix": { "target": "com.amazonaws.cloudwatchlogs#LogStreamName", "traits": { - "smithy.api#documentation": "

      Filters the results to include only events from log streams that have names starting with this prefix.

      \n

      If you specify a value for both logStreamNamePrefix and logStreamNames, but the value for\n logStreamNamePrefix does not match any log stream names specified in logStreamNames, the action\n returns an InvalidParameterException error.

      " + "smithy.api#documentation": "

      Filters the results to include only events from log streams that have names starting with this prefix.

      \n

      If you specify a value for both logStreamNamePrefix and logStreamNames, the action\n returns an InvalidParameterException error.

      " } }, "startTime": { @@ -6744,7 +6877,7 @@ } }, "traits": { - "smithy.api#documentation": "

      A tructures that contains information about one pattern token related to \n an anomaly.

      \n

      For more information about patterns and tokens, see CreateLogAnomalyDetector.\n

      " + "smithy.api#documentation": "

      A structure that contains information about one pattern token related to \n an anomaly.

      \n

      For more information about patterns and tokens, see CreateLogAnomalyDetector.\n

      " } }, "com.amazonaws.cloudwatchlogs#PatternTokens": { @@ -6837,7 +6970,7 @@ } ], "traits": { - "smithy.api#documentation": "

      Creates an account-level data protection policy or subscription filter policy that applies to all log groups \n or a subset of log groups in the account.

      \n

      \n Data protection policy\n

      \n

      A data protection policy can help safeguard sensitive \n data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only\n one account-level data protection policy.

      \n \n

      Sensitive data is detected and masked when it is ingested into a log group. When you set a \n data protection policy, log events ingested into the log groups before that time are not masked.

      \n
      \n

      If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups\n and all log groups that are created later in this account. The account-level policy is applied to existing log groups\n with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

      \n

      By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks.\n A user who has the logs:Unmask permission can use a \n GetLogEvents or \n FilterLogEvents\n operation with the unmask parameter set to true to view the unmasked \n log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs\n console by running a CloudWatch Logs Insights query with the unmask query command.

      \n

      For more information, including a list of types of data that can be audited and masked, see\n Protect sensitive log data with masking.

      \n

      To use the PutAccountPolicy operation for a data protection policy, you must be signed on with \n the logs:PutDataProtectionPolicy\n and logs:PutAccountPolicy permissions.

      \n

      The PutAccountPolicy operation applies to all log groups in the account. You can use \n PutDataProtectionPolicy\n to create a data protection policy that applies to just one log group. \n If a log group has its own data protection policy and \n the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.

      \n

      \n Subscription filter policy\n

      \n

      A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services.\n Account-level subscription filter policies apply to both existing log groups and log groups that are created later in \n this account. Supported destinations are Kinesis Data Streams, Firehose, and \n Lambda. When log events are sent to the receiving service, they are Base64 encoded and \n compressed with the GZIP format.

      \n

      The following destinations are supported for subscription filters:

      \n
        \n
      • \n

        An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.

        \n
      • \n
      • \n

        An Firehose data stream in the same account as the subscription policy, for same-account delivery.

        \n
      • \n
      • \n

        A Lambda function in the same account as the subscription policy, for same-account delivery.

        \n
      • \n
      • \n

        A logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Firehose are supported as logical destinations.

        \n
      • \n
      \n

      Each account can have one account-level subscription filter policy. \n If you are updating an existing filter, you must specify the correct name in PolicyName.\n To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda \n function, you must also have the iam:PassRole permission.

      " + "smithy.api#documentation": "

      Creates an account-level data protection policy or subscription filter policy that applies to all log groups \n or a subset of log groups in the account.

      \n

      \n Data protection policy\n

      \n

      A data protection policy can help safeguard sensitive \n data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only\n one account-level data protection policy.

      \n \n

      Sensitive data is detected and masked when it is ingested into a log group. When you set a \n data protection policy, log events ingested into the log groups before that time are not masked.

      \n
      \n

      If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups\n and all log groups that are created later in this account. The account-level policy is applied to existing log groups\n with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

      \n

      By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks.\n A user who has the logs:Unmask permission can use a \n GetLogEvents or \n FilterLogEvents\n operation with the unmask parameter set to true to view the unmasked \n log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs\n console by running a CloudWatch Logs Insights query with the unmask query command.

      \n

      For more information, including a list of types of data that can be audited and masked, see\n Protect sensitive log data with masking.

      \n

      To use the PutAccountPolicy operation for a data protection policy, you must be signed on with \n the logs:PutDataProtectionPolicy\n and logs:PutAccountPolicy permissions.

      \n

      The PutAccountPolicy operation applies to all log groups in the account. You can use \n PutDataProtectionPolicy\n to create a data protection policy that applies to just one log group. \n If a log group has its own data protection policy and \n the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.

      \n

      \n Subscription filter policy\n

      \n

      A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services.\n Account-level subscription filter policies apply to both existing log groups and log groups that are created later in \n this account. Supported destinations are Kinesis Data Streams, Firehose, and \n Lambda. When log events are sent to the receiving service, they are Base64 encoded and \n compressed with the GZIP format.

      \n

      The following destinations are supported for subscription filters:

      \n
        \n
      • \n

        An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.

        \n
      • \n
      • \n

        An Firehose data stream in the same account as the subscription policy, for same-account delivery.

        \n
      • \n
      • \n

        A Lambda function in the same account as the subscription policy, for same-account delivery.

        \n
      • \n
      • \n

        A logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Firehose are supported as logical destinations.

        \n
      • \n
      \n

      Each account can have one account-level subscription filter policy per Region. \n If you are updating an existing filter, you must specify the correct name in PolicyName.\n To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda \n function, you must also have the iam:PassRole permission.

      " } }, "com.amazonaws.cloudwatchlogs#PutAccountPolicyRequest": { @@ -6853,7 +6986,7 @@ "policyDocument": { "target": "com.amazonaws.cloudwatchlogs#AccountPolicyDocument", "traits": { - "smithy.api#documentation": "

      Specify the policy, in JSON.

      \n

      \n Data protection policy\n

      \n

      A data protection policy must include two JSON blocks:

      \n
        \n
      • \n

        The first block must include both a DataIdentifer array and an \n Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that\n you want to mask. For more information about the available options, see \n Types of data that you can mask.

        \n

        The Operation property with an Audit action is required to find the \n sensitive data terms. This Audit action must contain a FindingsDestination\n object. You can optionally use that FindingsDestination object to list one or more \n destinations to send audit findings to. If you specify destinations such as log groups, \n Firehose streams, and S3 buckets, they must already exist.

        \n
      • \n
      • \n

        The second block must include both a DataIdentifer array and an\n Operation property with an Deidentify action. The\n DataIdentifer array must exactly match the DataIdentifer array\n in the first block of the policy.

        \n

        The Operation property with the Deidentify action is what actually masks the \n data, and it must \n contain the \n \"MaskConfig\": {} object. The \n \"MaskConfig\": {} object must be empty.

        \n
      • \n
      \n

      For an example data protection policy, see the Examples section on this page.

      \n \n

      The contents of the two DataIdentifer arrays must match exactly.

      \n
      \n

      In addition to the two JSON blocks, the policyDocument can also include Name,\n Description, and Version fields. The Name is different than the \n operation's policyName parameter, and is used as a dimension when\n CloudWatch Logs reports audit findings metrics to CloudWatch.

      \n

      The JSON specified in policyDocument can be up to 30,720 characters long.

      \n

      \n Subscription filter policy\n

      \n

      A subscription filter policy can include the following attributes in a JSON block:

      \n
        \n
      • \n

        \n DestinationArn The ARN of the destination\n to deliver log events to. Supported destinations are:

        \n
          \n
        • \n

          An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.

          \n
        • \n
        • \n

          An Firehose data stream in the same account as the subscription policy, for same-account delivery.

          \n
        • \n
        • \n

          A Lambda function in the same account as the subscription policy, for same-account delivery.

          \n
        • \n
        • \n

          A logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Firehose are supported as logical destinations.

          \n
        • \n
        \n
      • \n
      • \n

        \n RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log\n events to the destination stream. You don't need to provide the ARN when you are working with\n a logical destination for cross-account delivery.

        \n
      • \n
      • \n

        \n FilterPattern A filter pattern for subscribing to a \n filtered stream of log events.

        \n
      • \n
      • \n

        \n DistributionThe method used to distribute log data to the destination. \n By default, log data is\n grouped by log stream, but the grouping can be set to Random for a more even distribution.\n This property is only applicable when the destination is an Kinesis Data Streams data stream.

        \n
      • \n
      ", + "smithy.api#documentation": "

      Specify the policy, in JSON.

      \n

      \n Data protection policy\n

      \n

      A data protection policy must include two JSON blocks:

      \n
        \n
      • \n

        The first block must include both a DataIdentifer array and an \n Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that\n you want to mask. For more information about the available options, see \n Types of data that you can mask.

        \n

        The Operation property with an Audit action is required to find the \n sensitive data terms. This Audit action must contain a FindingsDestination\n object. You can optionally use that FindingsDestination object to list one or more \n destinations to send audit findings to. If you specify destinations such as log groups, \n Firehose streams, and S3 buckets, they must already exist.

        \n
      • \n
      • \n

        The second block must include both a DataIdentifer array and an\n Operation property with an Deidentify action. The\n DataIdentifer array must exactly match the DataIdentifer array\n in the first block of the policy.

        \n

        The Operation property with the Deidentify action is what actually masks the \n data, and it must \n contain the \n \"MaskConfig\": {} object. The \n \"MaskConfig\": {} object must be empty.

        \n
      • \n
      \n

      For an example data protection policy, see the Examples section on this page.

      \n \n

      The contents of the two DataIdentifer arrays must match exactly.

      \n
      \n

      In addition to the two JSON blocks, the policyDocument can also include Name,\n Description, and Version fields. The Name is different than the \n operation's policyName parameter, and is used as a dimension when\n CloudWatch Logs reports audit findings metrics to CloudWatch.

      \n

      The JSON specified in policyDocument can be up to 30,720 characters long.

      \n

      \n Subscription filter policy\n

      \n

      A subscription filter policy can include the following attributes in a JSON block:

      \n
        \n
      • \n

        \n DestinationArn The ARN of the destination\n to deliver log events to. Supported destinations are:

        \n
          \n
        • \n

          An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.

          \n
        • \n
        • \n

          An Firehose data stream in the same account as the subscription policy, for same-account delivery.

          \n
        • \n
        • \n

          A Lambda function in the same account as the subscription policy, for same-account delivery.

          \n
        • \n
        • \n

          A logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Firehose are supported as logical destinations.

          \n
        • \n
        \n
      • \n
      • \n

        \n RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log\n events to the destination stream. You don't need to provide the ARN when you are working with\n a logical destination for cross-account delivery.

        \n
      • \n
      • \n

        \n FilterPattern A filter pattern for subscribing to a \n filtered stream of log events.

        \n
      • \n
      • \n

        \n Distribution The method used to distribute log data to the destination. \n By default, log data is\n grouped by log stream, but the grouping can be set to Random for a more even distribution.\n This property is only applicable when the destination is an Kinesis Data Streams data stream.

        \n
      • \n
      ", "smithy.api#required": {} } }, @@ -7166,7 +7299,7 @@ "logType": { "target": "com.amazonaws.cloudwatchlogs#LogType", "traits": { - "smithy.api#documentation": "

      Defines the type of log that the source is sending.

      \n
        \n
      • \n

        For Amazon CodeWhisperer, the valid value is \n EVENT_LOGS.

        \n
      • \n
      • \n

        For IAM Identity Centerr, the valid value is \n ERROR_LOGS.

        \n
      • \n
      • \n

        For Amazon WorkMail, the valid values are \n ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS.

        \n
      • \n
      ", + "smithy.api#documentation": "

      Defines the type of log that the source is sending.

      \n
        \n
      • \n

        For Amazon Bedrock, the valid value is \n APPLICATION_LOGS.

        \n
      • \n
      • \n

        For Amazon CodeWhisperer, the valid value is \n EVENT_LOGS.

        \n
      • \n
      • \n

        For IAM Identity Center, the valid value is \n ERROR_LOGS.

        \n
      • \n
      • \n

        For Amazon WorkMail, the valid values are \n ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS.

        \n
      • \n
      ", "smithy.api#required": {} } }, @@ -7379,6 +7512,12 @@ "traits": { "smithy.api#documentation": "

      The sequence token obtained from the response of the previous PutLogEvents\n call.

      \n \n

      The sequenceToken parameter is now ignored in PutLogEvents\n actions. PutLogEvents\n actions are now accepted and never return InvalidSequenceTokenException or\n DataAlreadyAcceptedException even if the sequence token is not valid.

      \n
      " } + }, + "entity": { + "target": "com.amazonaws.cloudwatchlogs#Entity", + "traits": { + "smithy.api#documentation": "

      Reserved for future use.

      " + } } }, "traits": { @@ -7399,6 +7538,12 @@ "traits": { "smithy.api#documentation": "

      The rejected events.

      " } + }, + "rejectedEntityInfo": { + "target": "com.amazonaws.cloudwatchlogs#RejectedEntityInfo", + "traits": { + "smithy.api#documentation": "

      Reserved for future use.

      " + } } }, "traits": { @@ -7431,7 +7576,7 @@ } ], "traits": { - "smithy.api#documentation": "

      Creates or updates a metric filter and associates it with the specified log group. With\n metric filters, you can configure rules to extract metric data from log events ingested\n through PutLogEvents.

      \n

      The maximum number of metric filters that can be associated with a log group is\n 100.

      \n

      When you create a metric filter, you can also optionally assign a unit and dimensions\n to the metric that is created.

      \n \n

      Metrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress or requestID as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n

      \n

      CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for\n your specified dimensions within one hour.

      \n

      You can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.\n

      \n
      " + "smithy.api#documentation": "

      Creates or updates a metric filter and associates it with the specified log group. With\n metric filters, you can configure rules to extract metric data from log events ingested\n through PutLogEvents.

      \n

      The maximum number of metric filters that can be associated with a log group is\n 100.

      \n

      Using regular expressions to create metric filters is supported. For these filters, \n there is a quotas of quota of two regular expression patterns within a single filter pattern. There\n is also a quota of five regular expression patterns per log group.\n For more information about using regular expressions in metric filters, \n see \n Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

      \n

      When you create a metric filter, you can also optionally assign a unit and dimensions\n to the metric that is created.

      \n \n

      Metrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress or requestID as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n

      \n

      CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for\n your specified dimensions within one hour.

      \n

      You can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.\n

      \n
      " } }, "com.amazonaws.cloudwatchlogs#PutMetricFilterRequest": { @@ -7681,7 +7826,7 @@ } ], "traits": { - "smithy.api#documentation": "

      Creates or updates a subscription filter and associates it with the specified log\n group. With subscription filters, you can subscribe to a real-time stream of log events\n ingested through PutLogEvents\n and have them delivered to a specific destination. When log events are sent to the receiving\n service, they are Base64 encoded and compressed with the GZIP format.

      \n

      The following destinations are supported for subscription filters:

      \n
        \n
      • \n

        An Amazon Kinesis data stream belonging to the same account as the subscription\n filter, for same-account delivery.

        \n
      • \n
      • \n

        A logical destination created with PutDestination that belongs to a different account, for cross-account delivery.\n We currently support Kinesis Data Streams and Firehose as logical destinations.

        \n
      • \n
      • \n

        An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as\n the subscription filter, for same-account delivery.

        \n
      • \n
      • \n

        An Lambda function that belongs to the same account as the\n subscription filter, for same-account delivery.

        \n
      • \n
      \n

      Each log group can have up to two subscription filters associated with it. If you are\n updating an existing filter, you must specify the correct name in filterName.\n

      \n

      To perform a PutSubscriptionFilter operation for any destination except a Lambda function, \n you must also have the \n iam:PassRole permission.

      " + "smithy.api#documentation": "

      Creates or updates a subscription filter and associates it with the specified log\n group. With subscription filters, you can subscribe to a real-time stream of log events\n ingested through PutLogEvents\n and have them delivered to a specific destination. When log events are sent to the receiving\n service, they are Base64 encoded and compressed with the GZIP format.

      \n

      The following destinations are supported for subscription filters:

      \n
        \n
      • \n

        An Amazon Kinesis data stream belonging to the same account as the subscription\n filter, for same-account delivery.

        \n
      • \n
      • \n

        A logical destination created with PutDestination that belongs to a different account, for cross-account delivery.\n We currently support Kinesis Data Streams and Firehose as logical destinations.

        \n
      • \n
      • \n

        An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as\n the subscription filter, for same-account delivery.

        \n
      • \n
      • \n

        An Lambda function that belongs to the same account as the\n subscription filter, for same-account delivery.

        \n
      • \n
      \n

      Each log group can have up to two subscription filters associated with it. If you are\n updating an existing filter, you must specify the correct name in filterName.\n

      \n

      Using regular expressions to create subscription filters is supported. For these filters, \n there is a quotas of quota of two regular expression patterns within a single filter pattern. There\n is also a quota of five regular expression patterns per log group.\n For more information about using regular expressions in subscription filters, \n see \n Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail.

      \n

      To perform a PutSubscriptionFilter operation for any destination except a Lambda function, \n you must also have the \n iam:PassRole permission.

      " } }, "com.amazonaws.cloudwatchlogs#PutSubscriptionFilterRequest": { @@ -7990,6 +8135,21 @@ } } }, + "com.amazonaws.cloudwatchlogs#RejectedEntityInfo": { + "type": "structure", + "members": { + "errorType": { + "target": "com.amazonaws.cloudwatchlogs#EntityRejectionErrorType", + "traits": { + "smithy.api#documentation": "

      Reserved for future use.

      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

      Reserved for future use.

      " + } + }, "com.amazonaws.cloudwatchlogs#RejectedLogEventsInfo": { "type": "structure", "members": { @@ -8442,7 +8602,7 @@ } ], "traits": { - "smithy.api#documentation": "

      Starts a Live Tail streaming session for one or more log groups. A Live Tail session returns a stream of \n log events that have\n been recently ingested in the log groups. For more information, see \n Use Live Tail to view logs in near real time.\n

      \n

      The response to this operation is a response stream, over which \n the server sends live log events and the client receives them.

      \n

      The following objects are sent over the stream:

      \n
        \n
      • \n

        A single LiveTailSessionStart \n object is sent at the start of the session.

        \n
      • \n
      • \n

        Every second, a LiveTailSessionUpdate\n object is sent. Each of these objects contains an array of the actual log events.

        \n

        If no new log events were ingested in the past second, the \n LiveTailSessionUpdate object will contain an empty array.

        \n

        The array of log events contained in a LiveTailSessionUpdate can include\n as many as 500 log events. If the number of log events matching the request exceeds 500 per second, the\n log events are sampled down to 500 log events to be included in each LiveTailSessionUpdate object.

        \n

        If your client consumes the log events slower than the server produces them, CloudWatch Logs\n buffers up to 10 LiveTailSessionUpdate events or 5000 log events, after \n which it starts dropping the oldest events.

        \n
      • \n
      • \n

        A SessionStreamingException \n object is returned if an unknown error occurs on the server side.

        \n
      • \n
      • \n

        A SessionTimeoutException \n object is returned when the session times out, after it has been kept open for three hours.

        \n
      • \n
      \n \n

      You can end a session before it times out by closing the session stream or by closing the client that is receiving the \n stream. The session also ends if the established connection between the client and the server breaks.

      \n
      \n

      For examples of using an SDK to start a Live Tail session, see \n \n Start a Live Tail session using an Amazon Web Services SDK.

      ", + "smithy.api#documentation": "

      Starts a Live Tail streaming session for one or more log groups. A Live Tail session returns a stream of \n log events that have\n been recently ingested in the log groups. For more information, see \n Use Live Tail to view logs in near real time.\n

      \n

      The response to this operation is a response stream, over which \n the server sends live log events and the client receives them.

      \n

      The following objects are sent over the stream:

      \n
        \n
      • \n

        A single LiveTailSessionStart \n object is sent at the start of the session.

        \n
      • \n
      • \n

        Every second, a LiveTailSessionUpdate\n object is sent. Each of these objects contains an array of the actual log events.

        \n

        If no new log events were ingested in the past second, the \n LiveTailSessionUpdate object will contain an empty array.

        \n

        The array of log events contained in a LiveTailSessionUpdate can include\n as many as 500 log events. If the number of log events matching the request exceeds 500 per second, the\n log events are sampled down to 500 log events to be included in each LiveTailSessionUpdate object.

        \n

        If your client consumes the log events slower than the server produces them, CloudWatch Logs\n buffers up to 10 LiveTailSessionUpdate events or 5000 log events, after \n which it starts dropping the oldest events.

        \n
      • \n
      • \n

        A SessionStreamingException \n object is returned if an unknown error occurs on the server side.

        \n
      • \n
      • \n

        A SessionTimeoutException \n object is returned when the session times out, after it has been kept open for three hours.

        \n
      • \n
      \n \n

      You can end a session before it times out by closing the session stream or by closing the client that is receiving the \n stream. The session also ends if the established connection between the client and the server breaks.

      \n
      \n

      For examples of using an SDK to start a Live Tail session, see \n \n Start a Live Tail session using an Amazon Web Services SDK.

      ", "smithy.api#endpoint": { "hostPrefix": "streaming-" }