From 3bf5ed50c54615c0e8dfa345234981eb4a96f7c4 Mon Sep 17 00:00:00 2001
From: awstools Deletes a CloudWatch Logs account policy. To use this operation, you must be signed on with the Deletes a CloudWatch Logs account policy. This stops the policy from applying to all log groups
+ * or a subset of log groups in the account. Log-group level policies will still be in effect. To use this operation, you must be signed on with the correct permissions depending on the type of policy
+ * that you are deleting. To delete a data protection policy, you must have the To delete a subscription filter policy, you must have the Creates an account-level data protection policy that applies to all log groups in the account. A data protection policy can help safeguard sensitive
+ * Creates an account-level data protection policy or subscription filter policy that applies to all log groups
+ * or a subset of log groups in the account.
+ * Data protection policy
+ * A data protection policy can help safeguard sensitive
* data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only
- * one account-level policy.logs:DeleteDataProtectionPolicy
and
- * logs:DeleteAccountPolicy
permissions.
+ *
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
@@ -39,7 +50,7 @@ export interface DeleteAccountPolicyCommandOutput extends __MetadataBearer {}
* const client = new CloudWatchLogsClient(config);
* const input = { // DeleteAccountPolicyRequest
* policyName: "STRING_VALUE", // required
- * policyType: "DATA_PROTECTION_POLICY", // required
+ * policyType: "DATA_PROTECTION_POLICY" || "SUBSCRIPTION_FILTER_POLICY", // required
* };
* const command = new DeleteAccountPolicyCommand(input);
* const response = await client.send(command);
diff --git a/clients/client-cloudwatch-logs/src/commands/DescribeAccountPoliciesCommand.ts b/clients/client-cloudwatch-logs/src/commands/DescribeAccountPoliciesCommand.ts
index 8aff2b3a7fea9..25ec479d5f991 100644
--- a/clients/client-cloudwatch-logs/src/commands/DescribeAccountPoliciesCommand.ts
+++ b/clients/client-cloudwatch-logs/src/commands/DescribeAccountPoliciesCommand.ts
@@ -36,7 +36,7 @@ export interface DescribeAccountPoliciesCommandOutput extends DescribeAccountPol
* // const { CloudWatchLogsClient, DescribeAccountPoliciesCommand } = require("@aws-sdk/client-cloudwatch-logs"); // CommonJS import
* const client = new CloudWatchLogsClient(config);
* const input = { // DescribeAccountPoliciesRequest
- * policyType: "DATA_PROTECTION_POLICY", // required
+ * policyType: "DATA_PROTECTION_POLICY" || "SUBSCRIPTION_FILTER_POLICY", // required
* policyName: "STRING_VALUE",
* accountIdentifiers: [ // AccountIds
* "STRING_VALUE",
@@ -50,8 +50,9 @@ export interface DescribeAccountPoliciesCommandOutput extends DescribeAccountPol
* // policyName: "STRING_VALUE",
* // policyDocument: "STRING_VALUE",
* // lastUpdatedTime: Number("long"),
- * // policyType: "DATA_PROTECTION_POLICY",
+ * // policyType: "DATA_PROTECTION_POLICY" || "SUBSCRIPTION_FILTER_POLICY",
* // scope: "ALL",
+ * // selectionCriteria: "STRING_VALUE",
* // accountId: "STRING_VALUE",
* // },
* // ],
diff --git a/clients/client-cloudwatch-logs/src/commands/PutAccountPolicyCommand.ts b/clients/client-cloudwatch-logs/src/commands/PutAccountPolicyCommand.ts
index 1991ab4412bb2..16b86d2c88ddc 100644
--- a/clients/client-cloudwatch-logs/src/commands/PutAccountPolicyCommand.ts
+++ b/clients/client-cloudwatch-logs/src/commands/PutAccountPolicyCommand.ts
@@ -28,15 +28,20 @@ export interface PutAccountPolicyCommandOutput extends PutAccountPolicyResponse,
/**
* @public
- * logs:DeleteDataProtectionPolicy
and
+ * logs:DeleteAccountPolicy
permissions.logs:DeleteSubscriptionFilter
and
+ * logs:DeleteAccountPolicy
permissions.
Sensitive data is detected and masked when it is ingested into a log group. When you set a * data protection policy, log events ingested into the log groups before that time are not masked.
*If you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups
- * and all log groups that are created later in this account. The account policy is applied to existing log groups
+ * and all log groups that are created later in this account. The account-level policy is applied to existing log groups
* with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks.
* A user who has the logs:Unmask
permission can use a
@@ -47,14 +52,43 @@ export interface PutAccountPolicyCommandOutput extends PutAccountPolicyResponse,
* console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see * Protect sensitive log data with masking.
- *To use the PutAccountPolicy
operation, you must be signed on with the logs:PutDataProtectionPolicy
+ *
To use the PutAccountPolicy
operation for a data protection policy, you must be signed on with
+ * the logs:PutDataProtectionPolicy
* and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can also use
+ *
The PutAccountPolicy
operation applies to all log groups in the account. You can use
* PutDataProtectionPolicy
* to create a data protection policy that applies to just one log group.
* If a log group has its own data protection policy and
* the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term
* specified in either policy is masked.
+ * Subscription filter policy + *
+ *A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. + * Account-level subscription filter policies apply to both existing log groups and log groups that are created later in + * this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and + * Lambda. When log events are sent to the receiving service, they are Base64 encoded and + * compressed with the GZIP format.
+ *The following destinations are supported for subscription filters:
+ *An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
+ *An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
+ *A Lambda function in the same account as the subscription policy, for same-account delivery.
+ *A logical destination in a different account created with PutDestination, for cross-account + * delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
+ *Each account can have one account-level subscription filter policy.
+ * If you are updating an existing filter, you must specify the correct name in PolicyName
.
+ * To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda
+ * function, you must also have the iam:PassRole
permission.
You can end a session before it times out by closing the session stream or by closing the client that is receiving the * stream. The session also ends if the established connection between the client and the server breaks.
* + *For examples of using an SDK to start a Live Tail session, see + * + * Start a Live Tail session using an Amazon Web Services SDK.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-cloudwatch-logs/src/models/models_0.ts b/clients/client-cloudwatch-logs/src/models/models_0.ts index cd784ed23472b..670bba90aa363 100644 --- a/clients/client-cloudwatch-logs/src/models/models_0.ts +++ b/clients/client-cloudwatch-logs/src/models/models_0.ts @@ -29,6 +29,7 @@ export class AccessDeniedException extends __BaseException { */ export const PolicyType = { DATA_PROTECTION_POLICY: "DATA_PROTECTION_POLICY", + SUBSCRIPTION_FILTER_POLICY: "SUBSCRIPTION_FILTER_POLICY", } as const; /** @@ -85,6 +86,12 @@ export interface AccountPolicy { */ scope?: Scope; + /** + * @public + *The log group selection criteria for this subscription filter policy.
+ */ + selectionCriteria?: string; + /** * @public *The Amazon Web Services account ID that the policy applies to.
@@ -970,7 +977,7 @@ export interface CreateLogGroupRequest { * *If you omit this parameter, the default of STANDARD
is used.
After a log group is created, its class can't be changed.
+ *The value of logGroupClass
can't be changed after a log group is created.
For details about the features supported by each class, see * Log classes @@ -1053,7 +1060,7 @@ export interface DeleteAccountPolicyRequest { /** * @public - *
The type of policy to delete. Currently, the only valid value is DATA_PROTECTION_POLICY
.
The type of policy to delete.
*/ policyType: PolicyType | undefined; } @@ -1431,7 +1438,7 @@ export interface DescribeAccountPoliciesRequest { /** * @public *Use this parameter to limit the returned policies to only the policies that match the policy type that you
- * specify. Currently, the only valid value is DATA_PROTECTION_POLICY
.
Specify the data protection policy, in JSON.
- *This policy must include two JSON blocks:
+ *Specify the policy, in JSON.
+ *+ * Data protection policy + *
+ *A data protection policy must include two JSON blocks:
*The first block must include both a DataIdentifer
array and an
@@ -3919,13 +3929,57 @@ export interface PutAccountPolicyRequest {
* Description
, and Version
fields. The Name
is different than the
* operation's policyName
parameter, and is used as a dimension when
* CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters.
The JSON specified in policyDocument
can be up to 30,720 characters long.
+ * Subscription filter policy + *
+ *A subscription filter policy can include the following attributes in a JSON block:
+ *+ * DestinationArn The ARN of the destination + * to deliver log events to. Supported destinations are:
+ *An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
+ *An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
+ *A Lambda function in the same account as the subscription policy, for same-account delivery.
+ *A logical destination in a different account created with PutDestination, for cross-account + * delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
+ *+ * RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log + * events to the destination stream. You don't need to provide the ARN when you are working with + * a logical destination for cross-account delivery.
+ *+ * FilterPattern A filter pattern for subscribing to a + * filtered stream of log events.
+ *
+ * DistributionThe method used to distribute log data to the destination.
+ * By default, log data is
+ * grouped by log stream, but the grouping can be set to Random
for a more even distribution.
+ * This property is only applicable when the destination is an Kinesis Data Streams data stream.
Currently the only valid value for this parameter is DATA_PROTECTION_POLICY
.
The type of policy that you're creating or updating.
*/ policyType: PolicyType | undefined; @@ -3936,6 +3990,18 @@ export interface PutAccountPolicyRequest { * ofALL
is used.
*/
scope?: Scope;
+
+ /**
+ * @public
+ * Use this parameter to apply the subscription filter policy to a subset of log groups in the account.
+ * Currently, the only supported filter is LogGroupName NOT IN []
. The selectionCriteria
+ * string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.
Using the selectionCriteria
parameter is useful to help prevent infinite loops.
+ * For more information, see Log recursion prevention.
Specifing selectionCriteria
is valid only when you specify SUBSCRIPTION_FILTER_POLICY
+ * for policyType
.
If you specify this parameter, then only log events in the log streams that you specify here are * included in the Live Tail session.
+ *If you specify this field, you can't also specify the logStreamNamePrefixes
field.
You can specify this parameter only if you specify only one log group in logGroupIdentifiers
.
If you specify this parameter, then only log events in the log streams that have names that start with the * prefixes that you specify here are * included in the Live Tail session.
+ *If you specify this field, you can't also specify the logStreamNames
field.
You can specify this parameter only if you specify only one log group in logGroupIdentifiers
.
The scope of the account policy.
" } }, + "selectionCriteria": { + "target": "com.amazonaws.cloudwatchlogs#SelectionCriteria", + "traits": { + "smithy.api#documentation": "The log group selection criteria for this subscription filter policy.
" + } + }, "accountId": { "target": "com.amazonaws.cloudwatchlogs#AccountId", "traits": { @@ -873,7 +879,7 @@ "logGroupClass": { "target": "com.amazonaws.cloudwatchlogs#LogGroupClass", "traits": { - "smithy.api#documentation": "Use this parameter to specify the log group class for this log group. There are two classes:
\nThe Standard
log class supports all CloudWatch Logs features.
The Infrequent Access
log class supports a subset of CloudWatch Logs features\n and incurs lower costs.
If you omit this parameter, the default of STANDARD
is used.
After a log group is created, its class can't be changed.
\nFor details about the features supported by each class, see \n Log classes\n
" + "smithy.api#documentation": "Use this parameter to specify the log group class for this log group. There are two classes:
\nThe Standard
log class supports all CloudWatch Logs features.
The Infrequent Access
log class supports a subset of CloudWatch Logs features\n and incurs lower costs.
If you omit this parameter, the default of STANDARD
is used.
The value of logGroupClass
can't be changed after a log group is created.
For details about the features supported by each class, see \n Log classes\n
" } } }, @@ -1008,7 +1014,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a CloudWatch Logs account policy.
\nTo use this operation, you must be signed on with the logs:DeleteDataProtectionPolicy
and \n logs:DeleteAccountPolicy
permissions.
Deletes a CloudWatch Logs account policy. This stops the policy from applying to all log groups\n or a subset of log groups in the account. Log-group level policies will still be in effect.
\nTo use this operation, you must be signed on with the correct permissions depending on the type of policy\n that you are deleting.
\nTo delete a data protection policy, you must have the logs:DeleteDataProtectionPolicy
and \n logs:DeleteAccountPolicy
permissions.
To delete a subscription filter policy, you must have the logs:DeleteSubscriptionFilter
and \n logs:DeleteAccountPolicy
permissions.
The type of policy to delete. Currently, the only valid value is DATA_PROTECTION_POLICY
.
The type of policy to delete.
", "smithy.api#required": {} } } @@ -1916,7 +1922,7 @@ "policyType": { "target": "com.amazonaws.cloudwatchlogs#PolicyType", "traits": { - "smithy.api#documentation": "Use this parameter to limit the returned policies to only the policies that match the policy type that you\n specify. Currently, the only valid value is DATA_PROTECTION_POLICY
.
Use this parameter to limit the returned policies to only the policies that match the policy type that you\n specify.
", "smithy.api#required": {} } }, @@ -6744,6 +6750,12 @@ "traits": { "smithy.api#enumValue": "DATA_PROTECTION_POLICY" } + }, + "SUBSCRIPTION_FILTER_POLICY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBSCRIPTION_FILTER_POLICY" + } } } }, @@ -6778,7 +6790,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an account-level data protection policy that applies to all log groups in the account. A data protection policy can help safeguard sensitive \n data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only\n one account-level policy.
\nSensitive data is detected and masked when it is ingested into a log group. When you set a \n data protection policy, log events ingested into the log groups before that time are not masked.
\nIf you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups\n and all log groups that are created later in this account. The account policy is applied to existing log groups\n with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks.\n A user who has the logs:Unmask
permission can use a \n GetLogEvents or \n FilterLogEvents\n operation with the unmask
parameter set to true
to view the unmasked \n log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs\n console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see\n Protect sensitive log data with masking.
\nTo use the PutAccountPolicy
operation, you must be signed on with the logs:PutDataProtectionPolicy
\n and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can also use \n PutDataProtectionPolicy\n to create a data protection policy that applies to just one log group. \n If a log group has its own data protection policy and \n the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.
Creates an account-level data protection policy or subscription filter policy that applies to all log groups \n or a subset of log groups in the account.
\n\n Data protection policy\n
\nA data protection policy can help safeguard sensitive \n data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only\n one account-level data protection policy.
\nSensitive data is detected and masked when it is ingested into a log group. When you set a \n data protection policy, log events ingested into the log groups before that time are not masked.
\nIf you use PutAccountPolicy
to create a data protection policy for your whole account, it applies to both existing log groups\n and all log groups that are created later in this account. The account-level policy is applied to existing log groups\n with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks.\n A user who has the logs:Unmask
permission can use a \n GetLogEvents or \n FilterLogEvents\n operation with the unmask
parameter set to true
to view the unmasked \n log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs\n console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see\n Protect sensitive log data with masking.
\nTo use the PutAccountPolicy
operation for a data protection policy, you must be signed on with \n the logs:PutDataProtectionPolicy
\n and logs:PutAccountPolicy
permissions.
The PutAccountPolicy
operation applies to all log groups in the account. You can use \n PutDataProtectionPolicy\n to create a data protection policy that applies to just one log group. \n If a log group has its own data protection policy and \n the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term\n specified in either policy is masked.
\n Subscription filter policy\n
\nA subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services.\n Account-level subscription filter policies apply to both existing log groups and log groups that are created later in \n this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and \n Lambda. When log events are sent to the receiving service, they are Base64 encoded and \n compressed with the GZIP format.
\nThe following destinations are supported for subscription filters:
\nAn Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
\nAn Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
\nA Lambda function in the same account as the subscription policy, for same-account delivery.
\nA logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
\nEach account can have one account-level subscription filter policy. \n If you are updating an existing filter, you must specify the correct name in PolicyName
.\n To perform a PutAccountPolicy
subscription filter operation for any destination except a Lambda \n function, you must also have the iam:PassRole
permission.
Specify the data protection policy, in JSON.
\nThis policy must include two JSON blocks:
\nThe first block must include both a DataIdentifer
array and an \n Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that\n you want to mask. For more information about the available options, see \n Types of data that you can mask.
The Operation
property with an Audit
action is required to find the \n sensitive data terms. This Audit
action must contain a FindingsDestination
\n object. You can optionally use that FindingsDestination
object to list one or more \n destinations to send audit findings to. If you specify destinations such as log groups, \n Kinesis Data Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an\n Operation
property with an Deidentify
action. The\n DataIdentifer
array must exactly match the DataIdentifer
array\n in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the \n data, and it must \n contain the \n \"MaskConfig\": {}
object. The \n \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
\nThe contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
,\n Description
, and Version
fields. The Name
is different than the \n operation's policyName
parameter, and is used as a dimension when\n CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters.
Specify the policy, in JSON.
\n\n Data protection policy\n
\nA data protection policy must include two JSON blocks:
\nThe first block must include both a DataIdentifer
array and an \n Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that\n you want to mask. For more information about the available options, see \n Types of data that you can mask.
The Operation
property with an Audit
action is required to find the \n sensitive data terms. This Audit
action must contain a FindingsDestination
\n object. You can optionally use that FindingsDestination
object to list one or more \n destinations to send audit findings to. If you specify destinations such as log groups, \n Kinesis Data Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an\n Operation
property with an Deidentify
action. The\n DataIdentifer
array must exactly match the DataIdentifer
array\n in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the \n data, and it must \n contain the \n \"MaskConfig\": {}
object. The \n \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
\nThe contents of the two DataIdentifer
arrays must match exactly.
In addition to the two JSON blocks, the policyDocument
can also include Name
,\n Description
, and Version
fields. The Name
is different than the \n operation's policyName
parameter, and is used as a dimension when\n CloudWatch Logs reports audit findings metrics to CloudWatch.
The JSON specified in policyDocument
can be up to 30,720 characters long.
\n Subscription filter policy\n
\nA subscription filter policy can include the following attributes in a JSON block:
\n\n DestinationArn The ARN of the destination\n to deliver log events to. Supported destinations are:
\nAn Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
\nAn Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.
\nA Lambda function in the same account as the subscription policy, for same-account delivery.
\nA logical destination in a different account created with PutDestination, for cross-account\n delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.
\n\n RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log\n events to the destination stream. You don't need to provide the ARN when you are working with\n a logical destination for cross-account delivery.
\n\n FilterPattern A filter pattern for subscribing to a \n filtered stream of log events.
\n\n DistributionThe method used to distribute log data to the destination. \n By default, log data is\n grouped by log stream, but the grouping can be set to Random
for a more even distribution.\n This property is only applicable when the destination is an Kinesis Data Streams data stream.
Currently the only valid value for this parameter is DATA_PROTECTION_POLICY
.
The type of policy that you're creating or updating.
", "smithy.api#required": {} } }, @@ -6810,6 +6822,12 @@ "traits": { "smithy.api#documentation": "Currently the only valid value for this parameter is ALL
, which specifies that the data \n protection policy applies to all log groups in the account. If you omit this parameter, the default\n of ALL
is used.
Use this parameter to apply the subscription filter policy to a subset of log groups in the account.\n Currently, the only supported filter is LogGroupName NOT IN []
. The selectionCriteria
\n string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.
Using the selectionCriteria
parameter is useful to help prevent infinite loops. \n For more information, see Log recursion prevention.
Specifing selectionCriteria
is valid only when you specify SUBSCRIPTION_FILTER_POLICY
\n for policyType
.
Starts a Live Tail streaming session for one or more log groups. A Live Tail session returns a stream of \n log events that have\n been recently ingested in the log groups. For more information, see \n Use Live Tail to view logs in near real time.\n
\nThe response to this operation is a response stream, over which \n the server sends live log events and the client receives them.
\nThe following objects are sent over the stream:
\nA single LiveTailSessionStart \n object is sent at the start of the session.
\nEvery second, a LiveTailSessionUpdate\n object is sent. Each of these objects contains an array of the actual log events.
\nIf no new log events were ingested in the past second, the \n LiveTailSessionUpdate
object will contain an empty array.
The array of log events contained in a LiveTailSessionUpdate
can include\n as many as 500 log events. If the number of log events matching the request exceeds 500 per second, the\n log events are sampled down to 500 log events to be included in each LiveTailSessionUpdate
object.
If your client consumes the log events slower than the server produces them, CloudWatch Logs\n buffers up to 10 LiveTailSessionUpdate
events or 5000 log events, after \n which it starts dropping the oldest events.
A SessionStreamingException \n object is returned if an unknown error occurs on the server side.
\nA SessionTimeoutException \n object is returned when the session times out, after it has been kept open for three hours.
\nYou can end a session before it times out by closing the session stream or by closing the client that is receiving the \n stream. The session also ends if the established connection between the client and the server breaks.
\nStarts a Live Tail streaming session for one or more log groups. A Live Tail session returns a stream of \n log events that have\n been recently ingested in the log groups. For more information, see \n Use Live Tail to view logs in near real time.\n
\nThe response to this operation is a response stream, over which \n the server sends live log events and the client receives them.
\nThe following objects are sent over the stream:
\nA single LiveTailSessionStart \n object is sent at the start of the session.
\nEvery second, a LiveTailSessionUpdate\n object is sent. Each of these objects contains an array of the actual log events.
\nIf no new log events were ingested in the past second, the \n LiveTailSessionUpdate
object will contain an empty array.
The array of log events contained in a LiveTailSessionUpdate
can include\n as many as 500 log events. If the number of log events matching the request exceeds 500 per second, the\n log events are sampled down to 500 log events to be included in each LiveTailSessionUpdate
object.
If your client consumes the log events slower than the server produces them, CloudWatch Logs\n buffers up to 10 LiveTailSessionUpdate
events or 5000 log events, after \n which it starts dropping the oldest events.
A SessionStreamingException \n object is returned if an unknown error occurs on the server side.
\nA SessionTimeoutException \n object is returned when the session times out, after it has been kept open for three hours.
\nYou can end a session before it times out by closing the session stream or by closing the client that is receiving the \n stream. The session also ends if the established connection between the client and the server breaks.
\nFor examples of using an SDK to start a Live Tail session, see \n \n Start a Live Tail session using an Amazon Web Services SDK.
", "smithy.api#endpoint": { "hostPrefix": "streaming-" } @@ -8405,13 +8426,13 @@ "logStreamNames": { "target": "com.amazonaws.cloudwatchlogs#InputLogStreamNames", "traits": { - "smithy.api#documentation": "If you specify this parameter, then only log events in the log streams that you specify here are \n included in the Live Tail session.
\nYou can specify this parameter only if you specify only one log group in logGroupIdentifiers
.
If you specify this parameter, then only log events in the log streams that you specify here are \n included in the Live Tail session.
\nIf you specify this field, you can't also specify the logStreamNamePrefixes
field.
You can specify this parameter only if you specify only one log group in logGroupIdentifiers
.
If you specify this parameter, then only log events in the log streams that have names that start with the \n prefixes that you specify here are \n included in the Live Tail session.
\nYou can specify this parameter only if you specify only one log group in logGroupIdentifiers
.
If you specify this parameter, then only log events in the log streams that have names that start with the \n prefixes that you specify here are \n included in the Live Tail session.
\nIf you specify this field, you can't also specify the logStreamNames
field.
You can specify this parameter only if you specify only one log group in logGroupIdentifiers
.