From 9d5e5d99364bce7b10479eb43f5b9176237524d4 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Tue, 19 Mar 2024 14:27:24 -0400 Subject: [PATCH] Release v1.51.3 (2024-03-19) (#5202) Release v1.51.3 (2024-03-19) === ### Service Client Updates * `service/cloudformation`: Updates service documentation * Documentation update, March 2024. Corrects some formatting. * `service/ec2`: Updates service API, documentation, and paginators * This release adds the new DescribeMacHosts API operation for getting information about EC2 Mac Dedicated Hosts. Users can now see the latest macOS versions that their underlying Apple Mac can support without needing to be updated. * `service/finspace`: Updates service API and documentation * `service/logs`: Updates service API and documentation * Update LogSamples field in Anomaly model to be a list of LogEvent * `service/managedblockchain-query`: Updates service API, documentation, and paginators --- CHANGELOG.md | 13 + aws/version.go | 2 +- .../cloudformation/2010-05-15/docs-2.json | 88 +-- models/apis/ec2/2016-11-15/api-2.json | 69 ++ models/apis/ec2/2016-11-15/docs-2.json | 41 ++ models/apis/ec2/2016-11-15/paginators-1.json | 6 + models/apis/finspace/2021-03-12/api-2.json | 25 +- models/apis/finspace/2021-03-12/docs-2.json | 34 +- models/apis/logs/2014-03-28/api-2.json | 7 +- models/apis/logs/2014-03-28/docs-2.json | 38 +- .../2023-05-04/api-2.json | 108 ++- .../2023-05-04/docs-2.json | 120 +++- .../2023-05-04/paginators-1.json | 6 + service/cloudwatchlogs/api.go | 124 +++- service/ec2/api.go | 292 ++++++++ service/ec2/ec2iface/interface.go | 7 + service/finspace/api.go | 239 ++++-- service/managedblockchainquery/api.go | 678 +++++++++++++++++- .../managedblockchainqueryiface/interface.go | 7 + 19 files changed, 1663 insertions(+), 241 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index df6ea35a6cf..f41179b4b34 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +Release v1.51.3 (2024-03-19) +=== + +### Service Client Updates +* `service/cloudformation`: Updates service documentation + * Documentation update, March 2024. Corrects some formatting. +* `service/ec2`: Updates service API, documentation, and paginators + * This release adds the new DescribeMacHosts API operation for getting information about EC2 Mac Dedicated Hosts. Users can now see the latest macOS versions that their underlying Apple Mac can support without needing to be updated. +* `service/finspace`: Updates service API and documentation +* `service/logs`: Updates service API and documentation + * Update LogSamples field in Anomaly model to be a list of LogEvent +* `service/managedblockchain-query`: Updates service API, documentation, and paginators + Release v1.51.2 (2024-03-18) === diff --git a/aws/version.go b/aws/version.go index 32e856c7bed..28f1c9a02f1 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.51.2" +const SDKVersion = "1.51.3" diff --git a/models/apis/cloudformation/2010-05-15/docs-2.json b/models/apis/cloudformation/2010-05-15/docs-2.json index eafcd587716..2c24eb5ae14 100644 --- a/models/apis/cloudformation/2010-05-15/docs-2.json +++ b/models/apis/cloudformation/2010-05-15/docs-2.json @@ -3,7 +3,7 @@ "service": "CloudFormation

CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure.

With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you.

For more information about CloudFormation, see the CloudFormation product page.

CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com.

", "operations": { "ActivateOrganizationsAccess": "

Activate trusted access with Organizations. With trusted access between StackSets and Organizations activated, the management account has permissions to create and manage StackSets for your organization.

", - "ActivateType": "

Activates a public third-party extension, making it available for use in stack templates. For more information, see Using public extensions in the CloudFormation User Guide.

Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

", + "ActivateType": "

Activates a public third-party extension, making it available for use in stack templates. For more information, see Using public extensions in the CloudFormation User Guide.

Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

", "BatchDescribeTypeConfigurations": "

Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry for the account and Region.

For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

", "CancelUpdateStack": "

Cancels an update on the specified stack. If the call completes successfully, the stack rolls back the update and reverts to the previous stack configuration.

You can cancel only stacks that are in the UPDATE_IN_PROGRESS state.

", "ContinueUpdateRollback": "

For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again.

A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll back all changes after a failed stack update. For example, you might have a stack that's rolling back to an old database instance that was deleted outside of CloudFormation. Because CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail.

", @@ -19,13 +19,13 @@ "DeleteStack": "

Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks don't show up in the DescribeStacks operation if the deletion has been completed successfully.

", "DeleteStackInstances": "

Deletes stack instances for the specified accounts, in the specified Amazon Web Services Regions.

", "DeleteStackSet": "

Deletes a stack set. Before you can delete a stack set, all its member stack instances must be deleted. For more information about how to complete this, see DeleteStackInstances.

", - "DeregisterType": "

Marks an extension or extension version as DEPRECATED in the CloudFormation registry, removing it from active use. Deprecated extensions or extension versions cannot be used in CloudFormation operations.

To deregister an entire extension, you must individually deregister all active versions of that extension. If an extension has only a single active version, deregistering that version results in the extension itself being deregistered and marked as deprecated in the registry.

You can't deregister the default version of an extension if there are other active version of that extension. If you do deregister the default version of an extension, the extension type itself is deregistered as well and marked as deprecated.

To view the deprecation status of an extension or extension version, use DescribeType .

", + "DeregisterType": "

Marks an extension or extension version as DEPRECATED in the CloudFormation registry, removing it from active use. Deprecated extensions or extension versions cannot be used in CloudFormation operations.

To deregister an entire extension, you must individually deregister all active versions of that extension. If an extension has only a single active version, deregistering that version results in the extension itself being deregistered and marked as deprecated in the registry.

You can't deregister the default version of an extension if there are other active version of that extension. If you do deregister the default version of an extension, the extension type itself is deregistered as well and marked as deprecated.

To view the deprecation status of an extension or extension version, use DescribeType.

", "DescribeAccountLimits": "

Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see CloudFormation Quotas in the CloudFormation User Guide.

", "DescribeChangeSet": "

Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the CloudFormation User Guide.

", "DescribeChangeSetHooks": "

Returns hook-related information for the change set and a list of changes that CloudFormation makes when you run the change set.

", "DescribeGeneratedTemplate": "

Describes a generated template. The output includes details about the progress of the creation of a generated template started by a CreateGeneratedTemplate API action or the update of a generated template started with an UpdateGeneratedTemplate API action.

", "DescribeOrganizationsAccess": "

Retrieves information about the account's OrganizationAccess status. This API can be called either by the management account or the delegated administrator by using the CallAs parameter. This API can also be called without the CallAs parameter by the management account.

", - "DescribePublisher": "

Returns information about a CloudFormation extension publisher.

If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account.

For more information about registering as a publisher, see:

", + "DescribePublisher": "

Returns information about a CloudFormation extension publisher.

If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account.

For more information about registering as a publisher, see:

", "DescribeResourceScan": "

Describes details of a resource scan.

", "DescribeStackDriftDetectionStatus": "

Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.

", "DescribeStackEvents": "

Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, see CloudFormation stack creation events in the CloudFormation User Guide.

You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID).

", @@ -66,21 +66,21 @@ "ListTypeRegistrations": "

Returns a list of registration tokens for the specified extension(s).

", "ListTypeVersions": "

Returns summary information about the versions of an extension.

", "ListTypes": "

Returns summary information about extension that have been registered with CloudFormation.

", - "PublishType": "

Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher .

", + "PublishType": "

Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher.

", "RecordHandlerProgress": "

Reports progress of a resource handler to CloudFormation.

Reserved for use by the CloudFormation CLI. Don't use this API in your code.

", "RegisterPublisher": "

Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are available for use by all CloudFormation users. This publisher ID applies to your account in all Amazon Web Services Regions.

For information about requirements for registering as a public extension publisher, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User Guide.

", - "RegisterType": "

Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes:

For more information about how to develop extensions and ready them for registration, see Creating Resource Providers in the CloudFormation CLI User Guide.

You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary.

Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request.

Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

", + "RegisterType": "

Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes:

For more information about how to develop extensions and ready them for registration, see Creating Resource Providers in the CloudFormation CLI User Guide.

You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary.

Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request.

Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

", "RollbackStack": "

When specifying RollbackStack, you preserve the state of previously provisioned resources when an operation fails. You can check the status of the stack through the DescribeStacks operation.

Rolls back the specified stack to the last known stable state from CREATE_FAILED or UPDATE_FAILED stack statuses.

This operation will delete a stack if it doesn't contain a last known stable state. A last known stable state includes any status in a *_COMPLETE. This includes the following stack statuses.

", "SetStackPolicy": "

Sets a stack policy for a specified stack.

", - "SetTypeConfiguration": "

Specifies the configuration data for a registered CloudFormation extension, in the given account and Region.

To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType . For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide.

", + "SetTypeConfiguration": "

Specifies the configuration data for a registered CloudFormation extension, in the given account and Region.

To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide.

", "SetTypeDefaultVersion": "

Specify the default version of an extension. The default version of an extension will be used in CloudFormation operations.

", "SignalResource": "

Sends a signal to the specified resource with a success or failure status. You can use the SignalResource operation in conjunction with a creation policy or update policy. CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource operation is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.

", "StartResourceScan": "

Starts a scan of the resources in this account in this Region. You can the status of a scan using the ListResourceScans API action.

", "StopStackSetOperation": "

Stops an in-progress operation on a stack set and its associated stack instances. StackSets will cancel all the unstarted stack instance deployments and wait for those are in-progress to complete.

", - "TestType": "

Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.

For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide.

If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing.

To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType .

Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension.

An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

", + "TestType": "

Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.

For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide.

If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing.

To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType.

Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension.

An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

", "UpdateGeneratedTemplate": "

Updates a generated template. This can be used to change the name, add and remove resources, refresh resources, and change the DeletionPolicy and UpdateReplacePolicy settings. You can check the status of the update to the generated template using the DescribeGeneratedTemplate API action.

", "UpdateStack": "

Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack through the DescribeStacks action.

To get a copy of the template for an existing stack, you can use the GetTemplate action.

For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack.

", - "UpdateStackInstances": "

Updates the parameter values for stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and Region.

You can only update stack instances in Amazon Web Services Regions and accounts where they already exist; to create additional stack instances, use CreateStackInstances .

During stack set updates, any parameters overridden for a stack instance aren't updated, but retain their overridden value.

You can only update the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

", + "UpdateStackInstances": "

Updates the parameter values for stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and Region.

You can only update stack instances in Amazon Web Services Regions and accounts where they already exist; to create additional stack instances, use CreateStackInstances.

During stack set updates, any parameters overridden for a stack instance aren't updated, but retain their overridden value.

You can only update the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

", "UpdateStackSet": "

Updates the stack set, and associated stack instances in the specified accounts and Amazon Web Services Regions.

Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

", "UpdateTerminationProtection": "

Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide.

For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack.

", "ValidateTemplate": "

Validates a specified template. CloudFormation first checks if the template is valid JSON. If it isn't, CloudFormation checks if the template is valid YAML. If both these checks fail, CloudFormation returns a template validation error.

" @@ -294,15 +294,15 @@ "Capabilities": { "base": null, "refs": { - "CreateChangeSetInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

Only one of the Capabilities and ResourceType parameters can be specified.

", - "CreateStackInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

Only one of the Capabilities and ResourceType parameters can be specified.

", - "CreateStackSetInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.

", + "CreateChangeSetInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

Only one of the Capabilities and ResourceType parameters can be specified.

", + "CreateStackInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

Only one of the Capabilities and ResourceType parameters can be specified.

", + "CreateStackSetInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.

", "DescribeChangeSetOutput$Capabilities": "

If you execute the change set, the list of capabilities that were explicitly acknowledged when the change set was created.

", "GetTemplateSummaryOutput$Capabilities": "

The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in CloudFormation Templates.

", "Stack$Capabilities": "

The capabilities allowed in the stack.

", "StackSet$Capabilities": "

The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your Amazon Web Services account—for example, by creating new Identity and Access Management (IAM) users. For more information, see Acknowledging IAM Resources in CloudFormation Templates.

", - "UpdateStackInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.

Only one of the Capabilities and ResourceType parameters can be specified.

", - "UpdateStackSetInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.

", + "UpdateStackInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.

Only one of the Capabilities and ResourceType parameters can be specified.

", + "UpdateStackSetInput$Capabilities": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.

", "ValidateTemplateOutput$Capabilities": "

The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error.

For more information, see Acknowledging IAM Resources in CloudFormation Templates.

" } }, @@ -520,7 +520,7 @@ "ConfigurationSchema": { "base": null, "refs": { - "DescribeTypeOutput$ConfigurationSchema": "

A JSON string that represent the current configuration data for the extension in this account and Region.

To set the configuration data for an extension, use SetTypeConfiguration . For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

" + "DescribeTypeOutput$ConfigurationSchema": "

A JSON string that represent the current configuration data for the extension in this account and Region.

To set the configuration data for an extension, use SetTypeConfiguration. For more information, see Configuring extensions at the account level in the CloudFormation User Guide.

" } }, "ConnectionArn": { @@ -951,7 +951,7 @@ "base": null, "refs": { "CreateStackInput$DisableRollback": "

Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure, but not both.

Default: false

", - "ExecuteChangeSetInput$DisableRollback": "

Preserves the state of previously provisioned resources when an operation fails. This parameter can't be specified when the OnStackFailure parameter to the CreateChangeSet API operation was specified.

Default: True

", + "ExecuteChangeSetInput$DisableRollback": "

Preserves the state of previously provisioned resources when an operation fails. This parameter can't be specified when the OnStackFailure parameter to the CreateChangeSet API operation was specified.

Default: True

", "Stack$DisableRollback": "

Boolean to enable or disable rollback on stack creation failures:

", "UpdateStackInput$DisableRollback": "

Preserve the state of previously provisioned resources when an operation fails.

Default: False

" } @@ -1079,7 +1079,7 @@ "GeneratedTemplateDeletionPolicy": { "base": null, "refs": { - "TemplateConfiguration$DeletionPolicy": "

The DeletionPolicy assigned to resources in the generated template. Supported values are:

For more information, see DeletionPolicy attribute in the CloudFormation User Guide.

" + "TemplateConfiguration$DeletionPolicy": "

The DeletionPolicy assigned to resources in the generated template. Supported values are:

For more information, see DeletionPolicy attribute in the CloudFormation User Guide.

" } }, "GeneratedTemplateId": { @@ -1126,7 +1126,7 @@ "GeneratedTemplateUpdateReplacePolicy": { "base": null, "refs": { - "TemplateConfiguration$UpdateReplacePolicy": "

The UpdateReplacePolicy assigned to resources in the generated template. Supported values are:

For more information, see UpdateReplacePolicy attribute in the CloudFormation User Guide.

" + "TemplateConfiguration$UpdateReplacePolicy": "

The UpdateReplacePolicy assigned to resources in the generated template. Supported values are:

For more information, see UpdateReplacePolicy attribute in the CloudFormation User Guide.

" } }, "GetGeneratedTemplateInput": { @@ -1581,7 +1581,7 @@ "base": "

Contains logging configuration information for an extension.

", "refs": { "ActivateTypeInput$LoggingConfig": "

Contains logging configuration information for an extension.

", - "DescribeTypeOutput$LoggingConfig": "

Contains logging configuration information for private extensions. This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null. For more information, see RegisterType .

", + "DescribeTypeOutput$LoggingConfig": "

Contains logging configuration information for private extensions. This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null. For more information, see RegisterType.

", "RegisterTypeInput$LoggingConfig": "

Specifies logging configuration information for an extension.

" } }, @@ -1696,7 +1696,7 @@ "MonitoringTimeInMinutes": { "base": null, "refs": { - "RollbackConfiguration$MonitoringTimeInMinutes": "

The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.

The default is 0 minutes.

If you specify a monitoring period but don't specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack , for example) as necessary.

If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.

" + "RollbackConfiguration$MonitoringTimeInMinutes": "

The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.

The default is 0 minutes.

If you specify a monitoring period but don't specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack, for example) as necessary.

If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.

" } }, "NameAlreadyExistsException": { @@ -1740,7 +1740,7 @@ "ListStackResourcesInput$NextToken": "

A string that identifies the next page of stack resources that you want to retrieve.

", "ListStackResourcesOutput$NextToken": "

If the output exceeds 1 MB, a string that identifies the next page of stack resources. If no additional page exists, this value is null.

", "ListStackSetAutoDeploymentTargetsInput$NextToken": "

A string that identifies the next page of stack set deployment targets that you want to retrieve.

", - "ListStackSetAutoDeploymentTargetsOutput$NextToken": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListStackSetAutoDeploymentTargets again and use that value for the NextToken parameter. If the request returns all results, NextToken is set to an empty string.

", + "ListStackSetAutoDeploymentTargetsOutput$NextToken": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListStackSetAutoDeploymentTargets again and use that value for the NextToken parameter. If the request returns all results, NextToken is set to an empty string.

", "ListStackSetOperationResultsInput$NextToken": "

If the previous request didn't return all the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSetOperationResults again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", "ListStackSetOperationResultsOutput$NextToken": "

If the request doesn't return all results, NextToken is set to a token. To retrieve the next set of results, call ListOperationResults again and assign that token to the request object's NextToken parameter. If there are no remaining results, NextToken is set to null.

", "ListStackSetOperationsInput$NextToken": "

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSetOperations again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", @@ -1795,8 +1795,8 @@ "OnStackFailure": { "base": null, "refs": { - "CreateChangeSetInput$OnStackFailure": "

Determines what action will be taken if stack creation fails. If this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

For nested stacks, when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to be deleted.

", - "DescribeChangeSetOutput$OnStackFailure": "

Determines what action will be taken if stack creation fails. When this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

" + "CreateChangeSetInput$OnStackFailure": "

Determines what action will be taken if stack creation fails. If this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

For nested stacks, when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to be deleted.

", + "DescribeChangeSetOutput$OnStackFailure": "

Determines what action will be taken if stack creation fails. When this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values:

" } }, "OperationIdAlreadyExistsException": { @@ -1867,10 +1867,10 @@ "base": null, "refs": { "OrganizationalUnitIdList$member": null, - "StackInstance$OrganizationalUnitId": "

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets .

", - "StackInstanceSummary$OrganizationalUnitId": "

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets .

", + "StackInstance$OrganizationalUnitId": "

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

", + "StackInstanceSummary$OrganizationalUnitId": "

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

", "StackSetAutoDeploymentTargetSummary$OrganizationalUnitId": "

The organization root ID or organizational unit (OU) IDs where the stack set is targeted.

", - "StackSetOperationResultSummary$OrganizationalUnitId": "

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets .

" + "StackSetOperationResultSummary$OrganizationalUnitId": "

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" } }, "OrganizationalUnitIdList": { @@ -1878,7 +1878,7 @@ "refs": { "DeploymentTargets$OrganizationalUnitIds": "

The organization root ID or organizational unit (OU) IDs to which StackSets deploys.

", "ImportStacksToStackSetInput$OrganizationalUnitIds": "

The list of OU ID's to which the stacks being imported has to be mapped as deployment target.

", - "StackSet$OrganizationalUnitIds": "

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets .

" + "StackSet$OrganizationalUnitIds": "

[Service-managed permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.

" } }, "Output": { @@ -1956,16 +1956,16 @@ "base": null, "refs": { "CreateChangeSetInput$Parameters": "

A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type.

", - "CreateStackInput$Parameters": "

A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

", - "CreateStackInstancesInput$ParameterOverrides": "

A list of stack set parameters whose values you want to override in the selected stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance operations:

During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template.

", + "CreateStackInput$Parameters": "

A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

", + "CreateStackInstancesInput$ParameterOverrides": "

A list of stack set parameters whose values you want to override in the selected stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance operations:

During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template.

", "CreateStackSetInput$Parameters": "

The input parameters for the stack set template.

", - "DescribeChangeSetOutput$Parameters": "

A list of Parameter structures that describes the input parameters and their values used to create the change set. For more information, see the Parameter data type.

", + "DescribeChangeSetOutput$Parameters": "

A list of Parameter structures that describes the input parameters and their values used to create the change set. For more information, see the Parameter data type.

", "EstimateTemplateCostInput$Parameters": "

A list of Parameter structures that specify input parameters.

", "Stack$Parameters": "

A list of Parameter structures.

", "StackInstance$ParameterOverrides": "

A list of parameters from the stack set template whose values have been overridden in this stack instance.

", "StackSet$Parameters": "

A list of input parameters for a stack set.

", - "UpdateStackInput$Parameters": "

A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

", - "UpdateStackInstancesInput$ParameterOverrides": "

A list of input parameters whose values you want to update for the specified stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance update operations:

During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

", + "UpdateStackInput$Parameters": "

A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type.

", + "UpdateStackInstancesInput$ParameterOverrides": "

A list of input parameters whose values you want to update for the specified stack instances.

Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values during stack instance update operations:

During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their overridden value.

You can only override the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

", "UpdateStackSetInput$Parameters": "

A list of input parameters for the stack set template.

" } }, @@ -2634,7 +2634,7 @@ "base": null, "refs": { "ActivateTypeInput$ExecutionRoleArn": "

The name of the IAM execution role to use to activate the extension.

", - "DescribeTypeOutput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM execution role used to register the extension. This applies only to private extensions you have registered in your account. For more information, see RegisterType .

If the registered extension calls any Amazon Web Services APIs, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your extension with the appropriate credentials.

", + "DescribeTypeOutput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM execution role used to register the extension. This applies only to private extensions you have registered in your account. For more information, see RegisterType.

If the registered extension calls any Amazon Web Services APIs, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your extension with the appropriate credentials.

", "LoggingConfig$LogRoleArn": "

The Amazon Resource Name (ARN) of the role that CloudFormation should assume when sending log entries to CloudWatch Logs.

", "RegisterTypeInput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the extension.

For CloudFormation to assume the specified execution role, the role must contain a trust relationship with the CloudFormation service principal (resources.cloudformation.amazonaws.com). For more information about adding trust relationships, see Modifying a role trust policy in the Identity and Access Management User Guide.

If your extension calls Amazon Web Services APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials.

" } @@ -3099,7 +3099,7 @@ } }, "StackSetAutoDeploymentTargetSummary": { - "base": "

One of the targets for the stack set. Returned by the ListStackSetAutoDeploymentTargets API operation.

", + "base": "

One of the targets for the stack set. Returned by the ListStackSetAutoDeploymentTargets API operation.

", "refs": { "StackSetAutoDeploymentTargetSummaries$member": null } @@ -3531,7 +3531,7 @@ "DescribeResourceScanOutput$StartTime": "

The time that the resource scan was started.

", "DescribeResourceScanOutput$EndTime": "

The time that the resource scan was finished.

", "DescribeStackDriftDetectionStatusOutput$Timestamp": "

Time at which the stack drift detection operation was initiated.

", - "DescribeTypeOutput$LastUpdated": "

When the specified extension version was registered. This applies only to:

", + "DescribeTypeOutput$LastUpdated": "

When the specified extension version was registered. This applies only to:

", "DescribeTypeOutput$TimeCreated": "

When the specified private extension version was registered or activated in your account.

", "ResourceScanSummary$StartTime": "

The time that the resource scan was started.

", "ResourceScanSummary$EndTime": "

The time that the resource scan was finished.

", @@ -3554,7 +3554,7 @@ "StackSetOperationSummary$EndTimestamp": "

The time at which the stack set operation ended, across all accounts and Regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or Region.

", "StackSetSummary$LastDriftCheckTimestamp": "

Most recent time when CloudFormation performed a drift detection operation on the stack set. This value will be NULL for any stack set on which drift detection hasn't yet been performed.

", "TypeConfigurationDetails$LastUpdated": "

When the configuration data was last updated for this extension.

If a configuration hasn't been set for a specified extension, CloudFormation returns null.

", - "TypeSummary$LastUpdated": "

When the specified extension version was registered. This applies only to:

For all other extension types, CloudFormation returns null.

", + "TypeSummary$LastUpdated": "

When the specified extension version was registered. This applies only to:

For all other extension types, CloudFormation returns null.

", "TypeVersionSummary$TimeCreated": "

When the version was registered.

" } }, @@ -3597,7 +3597,7 @@ "Type": { "base": null, "refs": { - "RollbackTrigger$Type": "

The resource type of the rollback trigger. Specify either AWS::CloudWatch::Alarm or AWS::CloudWatch::CompositeAlarm resource types.

" + "RollbackTrigger$Type": "

The resource type of the rollback trigger. Specify either AWS::CloudWatch::Alarm or AWS::CloudWatch::CompositeAlarm resource types.

" } }, "TypeArn": { @@ -3611,11 +3611,11 @@ "ListTypeRegistrationsInput$TypeArn": "

The Amazon Resource Name (ARN) of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

", "ListTypeVersionsInput$Arn": "

The Amazon Resource Name (ARN) of the extension for which you want version summary information.

Conditional: You must specify either TypeName and Type, or Arn.

", "PublishTypeOutput$PublicTypeArn": "

The Amazon Resource Name (ARN) assigned to the public extension upon publication.

", - "SetTypeConfigurationInput$TypeArn": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region.

Do not include the extension versions suffix at the end of the ARN. You can set the configuration for an extension, but not for a specific extension version.

", + "SetTypeConfigurationInput$TypeArn": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region.

Do not include the extension versions suffix at the end of the ARN. You can set the configuration for an extension, but not for a specific extension version.

", "TestTypeInput$Arn": "

The Amazon Resource Name (ARN) of the extension.

Conditional: You must specify Arn, or TypeName and Type.

", "TestTypeOutput$TypeVersionArn": "

The Amazon Resource Name (ARN) of the extension.

", - "TypeConfigurationDetails$TypeArn": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region.

", - "TypeConfigurationIdentifier$TypeArn": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region.

", + "TypeConfigurationDetails$TypeArn": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region.

", + "TypeConfigurationIdentifier$TypeArn": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region.

", "TypeSummary$TypeArn": "

The Amazon Resource Name (ARN) of the extension.

", "TypeVersionSummary$Arn": "

The Amazon Resource Name (ARN) of the extension version.

" } @@ -3623,7 +3623,7 @@ "TypeConfiguration": { "base": null, "refs": { - "SetTypeConfigurationInput$Configuration": "

The configuration data for the extension, in this account and Region.

The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of DescribeType . For more information, see Defining account-level configuration data for an extension in the CloudFormation CLI User Guide.

", + "SetTypeConfigurationInput$Configuration": "

The configuration data for the extension, in this account and Region.

The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of DescribeType. For more information, see Defining account-level configuration data for an extension in the CloudFormation CLI User Guide.

", "TypeConfigurationDetails$Configuration": "

A JSON string specifying the configuration data for the extension, in this account and Region.

If a configuration hasn't been set for a specified extension, CloudFormation returns {}.

" } }, @@ -3694,7 +3694,7 @@ "DeactivateTypeInput$TypeName": "

The type name of the extension, in this account and Region. If you specified a type name alias when enabling the extension, use the type name alias.

Conditional: You must specify either Arn, or TypeName and Type.

", "DeregisterTypeInput$TypeName": "

The name of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

", "DescribeTypeInput$TypeName": "

The name of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

", - "DescribeTypeOutput$TypeName": "

The name of the extension.

If the extension is a public third-party type you have activated with a type name alias, CloudFormation returns the type name alias. For more information, see ActivateType .

", + "DescribeTypeOutput$TypeName": "

The name of the extension.

If the extension is a public third-party type you have activated with a type name alias, CloudFormation returns the type name alias. For more information, see ActivateType.

", "DescribeTypeOutput$OriginalTypeName": "

For public extensions that have been activated for this account and Region, the type name of the public extension.

If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide.

", "ListTypeRegistrationsInput$TypeName": "

The name of the extension.

Conditional: You must specify either TypeName and Type, or Arn.

", "ListTypeVersionsInput$TypeName": "

The name of the extension for which you want version summary information.

Conditional: You must specify either TypeName and Type, or Arn.

", @@ -3707,7 +3707,7 @@ "TestTypeInput$TypeName": "

The name of the extension to test.

Conditional: You must specify Arn, or TypeName and Type.

", "TypeConfigurationDetails$TypeName": "

The name of the extension.

", "TypeConfigurationIdentifier$TypeName": "

The name of the extension type to which this configuration applies.

", - "TypeSummary$TypeName": "

The name of the extension.

If you specified a TypeNameAlias when you call the ActivateType API operation in your account and Region, CloudFormation considers that alias as the type name.

", + "TypeSummary$TypeName": "

The name of the extension.

If you specified a TypeNameAlias when you call the ActivateType API operation in your account and Region, CloudFormation considers that alias as the type name.

", "TypeSummary$OriginalTypeName": "

For public extensions that have been activated for this account and Region, the type name of the public extension.

If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide.

", "TypeVersionSummary$TypeName": "

The name of the extension.

" } @@ -3758,10 +3758,10 @@ "refs": { "DeregisterTypeInput$VersionId": "

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the extension version when it is registered.

", "DescribeTypeInput$VersionId": "

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the extension version when it is registered.

If you specify a VersionId, DescribeType returns information about that specific extension version. Otherwise, it returns information about the default extension version.

", - "DescribeTypeOutput$DefaultVersionId": "

The ID of the default version of the extension. The default version is used when the extension version isn't specified.

This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null. For more information, see RegisterType .

To set the default version of an extension, use SetTypeDefaultVersion.

", + "DescribeTypeOutput$DefaultVersionId": "

The ID of the default version of the extension. The default version is used when the extension version isn't specified.

This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null. For more information, see RegisterType.

To set the default version of an extension, use SetTypeDefaultVersion.

", "SetTypeDefaultVersionInput$VersionId": "

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the extension version when it is registered.

", "TestTypeInput$VersionId": "

The version of the extension to test.

You can specify the version id with either Arn, or with TypeName and Type.

If you don't specify a version, CloudFormation uses the default version of the extension in this account and Region for testing.

", - "TypeSummary$DefaultVersionId": "

The ID of the default version of the extension. The default version is used when the extension version isn't specified.

This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon and published by third parties, CloudFormation returns null. For more information, see RegisterType .

To set the default version of an extension, use SetTypeDefaultVersion.

", + "TypeSummary$DefaultVersionId": "

The ID of the default version of the extension. The default version is used when the extension version isn't specified.

This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon and published by third parties, CloudFormation returns null. For more information, see RegisterType.

To set the default version of an extension, use SetTypeDefaultVersion.

", "TypeVersionSummary$VersionId": "

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the extension version when it's registered.

" } }, diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 823b7ba567e..6782d0810a2 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -2613,6 +2613,15 @@ "input":{"shape":"DescribeLockedSnapshotsRequest"}, "output":{"shape":"DescribeLockedSnapshotsResult"} }, + "DescribeMacHosts":{ + "name":"DescribeMacHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMacHostsRequest"}, + "output":{"shape":"DescribeMacHostsResult"} + }, "DescribeManagedPrefixLists":{ "name":"DescribeManagedPrefixLists", "http":{ @@ -17015,6 +17024,39 @@ } } }, + "DescribeMacHostsRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "HostIds":{ + "shape":"RequestHostIdList", + "locationName":"HostId" + }, + "MaxResults":{"shape":"DescribeMacHostsRequestMaxResults"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeMacHostsRequestMaxResults":{ + "type":"integer", + "max":500, + "min":5 + }, + "DescribeMacHostsResult":{ + "type":"structure", + "members":{ + "MacHosts":{ + "shape":"MacHostList", + "locationName":"macHostSet" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, "DescribeManagedPrefixListsRequest":{ "type":"structure", "members":{ @@ -32002,6 +32044,33 @@ ] }, "Long":{"type":"long"}, + "MacHost":{ + "type":"structure", + "members":{ + "HostId":{ + "shape":"DedicatedHostId", + "locationName":"hostId" + }, + "MacOSLatestSupportedVersions":{ + "shape":"MacOSVersionStringList", + "locationName":"macOSLatestSupportedVersionSet" + } + } + }, + "MacHostList":{ + "type":"list", + "member":{ + "shape":"MacHost", + "locationName":"item" + } + }, + "MacOSVersionStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"item" + } + }, "MaintenanceDetails":{ "type":"structure", "members":{ diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index b0aa5545a73..78b1ab8cb7a 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -294,6 +294,7 @@ "DescribeLocalGatewayVirtualInterfaces": "

Describes the specified local gateway virtual interfaces.

", "DescribeLocalGateways": "

Describes one or more local gateways. By default, all local gateways are described. Alternatively, you can filter the results.

", "DescribeLockedSnapshots": "

Describes the lock status for a snapshot.

", + "DescribeMacHosts": "

Describes the specified EC2 Mac Dedicated Host or all of your EC2 Mac Dedicated Hosts.

", "DescribeManagedPrefixLists": "

Describes your managed prefix lists and any Amazon Web Services-managed prefix lists.

To view the entries for your prefix list, use GetManagedPrefixListEntries.

", "DescribeMovingAddresses": "

This action is deprecated.

Describes your Elastic IP addresses that are being moved from or being restored to the EC2-Classic platform. This request does not return information about any other Elastic IP addresses in your account.

", "DescribeNatGateways": "

Describes one or more of your NAT gateways.

", @@ -5164,6 +5165,7 @@ "refs": { "DedicatedHostIdList$member": null, "LaunchTemplatePlacementRequest$HostId": "

The ID of the Dedicated Host for the instance.

", + "MacHost$HostId": "

The EC2 Mac Dedicated Host ID.

", "ModifyInstancePlacementRequest$HostId": "

The ID of the Dedicated Host with which to associate the instance.

", "RequestHostIdList$member": null, "RequestHostIdSet$member": null @@ -6964,6 +6966,22 @@ "refs": { } }, + "DescribeMacHostsRequest": { + "base": null, + "refs": { + } + }, + "DescribeMacHostsRequestMaxResults": { + "base": null, + "refs": { + "DescribeMacHostsRequest$MaxResults": "

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

" + } + }, + "DescribeMacHostsResult": { + "base": null, + "refs": { + } + }, "DescribeManagedPrefixListsRequest": { "base": null, "refs": { @@ -9433,6 +9451,7 @@ "DescribeLocalGatewayVirtualInterfacesRequest$Filters": "

One or more filters.

", "DescribeLocalGatewaysRequest$Filters": "

One or more filters.

", "DescribeLockedSnapshotsRequest$Filters": "

The filters.

", + "DescribeMacHostsRequest$Filters": "

The filters.

", "DescribeManagedPrefixListsRequest$Filters": "

One or more filters.

", "DescribeMovingAddressesRequest$Filters": "

One or more filters.

", "DescribeNatGatewaysRequest$Filter": "

The filters.

", @@ -14283,6 +14302,24 @@ "VpnGateway$AmazonSideAsn": "

The private Autonomous System Number (ASN) for the Amazon side of a BGP session.

" } }, + "MacHost": { + "base": "

Information about the EC2 Mac Dedicated Host.

", + "refs": { + "MacHostList$member": null + } + }, + "MacHostList": { + "base": null, + "refs": { + "DescribeMacHostsResult$MacHosts": "

Information about the EC2 Mac Dedicated Hosts.

" + } + }, + "MacOSVersionStringList": { + "base": null, + "refs": { + "MacHost$MacOSLatestSupportedVersions": "

The latest macOS versions that the EC2 Mac Dedicated Host can launch without being upgraded.

" + } + }, "MaintenanceDetails": { "base": "

Details for Site-to-Site VPN tunnel endpoint maintenance events.

", "refs": { @@ -17413,6 +17450,7 @@ "base": null, "refs": { "DescribeHostsRequest$HostIds": "

The IDs of the Dedicated Hosts. The IDs are used for targeted instance launches.

", + "DescribeMacHostsRequest$HostIds": "

The IDs of the EC2 Mac Dedicated Hosts.

", "ModifyHostsRequest$HostIds": "

The IDs of the Dedicated Hosts to modify.

", "ReleaseHostsRequest$HostIds": "

The IDs of the Dedicated Hosts to release.

" } @@ -19744,6 +19782,8 @@ "DescribeLocalGatewaysResult$NextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "DescribeLockedSnapshotsRequest$NextToken": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

", "DescribeLockedSnapshotsResult$NextToken": "

The token to include in another request to get the next page of items. This value is null when there are no more items to return.

", + "DescribeMacHostsRequest$NextToken": "

The token to use to retrieve the next page of results.

", + "DescribeMacHostsResult$NextToken": "

The token to use to retrieve the next page of results.

", "DescribeMovingAddressesRequest$NextToken": "

The token for the next page of results.

", "DescribeMovingAddressesResult$NextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "DescribeNatGatewaysRequest$NextToken": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

", @@ -20453,6 +20493,7 @@ "LockSnapshotResult$SnapshotId": "

The ID of the snapshot

", "LockedSnapshotsInfo$OwnerId": "

The account ID of the Amazon Web Services account that owns the snapshot.

", "LockedSnapshotsInfo$SnapshotId": "

The ID of the snapshot.

", + "MacOSVersionStringList$member": null, "MaintenanceDetails$PendingMaintenance": "

Verify existence of a pending maintenance.

", "ManagedPrefixList$AddressFamily": "

The IP address version.

", "ManagedPrefixList$StateMessage": "

The state message.

", diff --git a/models/apis/ec2/2016-11-15/paginators-1.json b/models/apis/ec2/2016-11-15/paginators-1.json index d0520c22a85..ea572e61e86 100755 --- a/models/apis/ec2/2016-11-15/paginators-1.json +++ b/models/apis/ec2/2016-11-15/paginators-1.json @@ -342,6 +342,12 @@ "output_token": "NextToken", "result_key": "LocalGateways" }, + "DescribeMacHosts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MacHosts" + }, "DescribeManagedPrefixLists": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/models/apis/finspace/2021-03-12/api-2.json b/models/apis/finspace/2021-03-12/api-2.json index 6ae0719865d..9592cea24a1 100644 --- a/models/apis/finspace/2021-03-12/api-2.json +++ b/models/apis/finspace/2021-03-12/api-2.json @@ -895,7 +895,12 @@ "max":100, "min":1 }, - "AvailabilityZoneId":{"type":"string"}, + "AvailabilityZoneId":{ + "type":"string", + "max":12, + "min":8, + "pattern":"^[a-zA-Z0-9-]+$" + }, "AvailabilityZoneIds":{ "type":"list", "member":{"shape":"AvailabilityZoneId"} @@ -939,7 +944,8 @@ "ChangesetId":{ "type":"string", "max":26, - "min":1 + "min":1, + "pattern":"^[a-zA-Z0-9]+$" }, "ChangesetStatus":{ "type":"string", @@ -1182,6 +1188,7 @@ "changesetId":{"shape":"ChangesetId"}, "segmentConfigurations":{"shape":"KxDataviewSegmentConfigurationList"}, "autoUpdate":{"shape":"booleanValue"}, + "readWrite":{"shape":"booleanValue"}, "description":{"shape":"Description"}, "tags":{"shape":"TagMap"}, "clientToken":{ @@ -1202,6 +1209,7 @@ "segmentConfigurations":{"shape":"KxDataviewSegmentConfigurationList"}, "description":{"shape":"Description"}, "autoUpdate":{"shape":"booleanValue"}, + "readWrite":{"shape":"booleanValue"}, "createdTimestamp":{"shape":"Timestamp"}, "lastModifiedTimestamp":{"shape":"Timestamp"}, "status":{"shape":"KxDataviewStatus"} @@ -1969,6 +1977,7 @@ "activeVersions":{"shape":"KxDataviewActiveVersionList"}, "description":{"shape":"Description"}, "autoUpdate":{"shape":"booleanValue"}, + "readWrite":{"shape":"booleanValue"}, "environmentId":{"shape":"EnvironmentId"}, "createdTimestamp":{"shape":"Timestamp"}, "lastModifiedTimestamp":{"shape":"Timestamp"}, @@ -2313,13 +2322,13 @@ }, "KxCommandLineArgumentKey":{ "type":"string", - "max":50, + "max":1024, "min":1, "pattern":"^(?![Aa][Ww][Ss])(s|([a-zA-Z][a-zA-Z0-9_]+))|(AWS_ZIP_DEFAULT)" }, "KxCommandLineArgumentValue":{ "type":"string", - "max":50, + "max":1024, "min":1, "pattern":"^[a-zA-Z0-9_:./,]+$" }, @@ -2407,6 +2416,7 @@ "status":{"shape":"KxDataviewStatus"}, "description":{"shape":"Description"}, "autoUpdate":{"shape":"booleanValue"}, + "readWrite":{"shape":"booleanValue"}, "createdTimestamp":{"shape":"Timestamp"}, "lastModifiedTimestamp":{"shape":"Timestamp"}, "statusReason":{"shape":"KxDataviewStatusReason"} @@ -2426,7 +2436,8 @@ ], "members":{ "dbPaths":{"shape":"SegmentConfigurationDbPathList"}, - "volumeName":{"shape":"KxVolumeName"} + "volumeName":{"shape":"KxVolumeName"}, + "onDemand":{"shape":"booleanValue"} } }, "KxDataviewSegmentConfigurationList":{ @@ -2522,7 +2533,6 @@ }, "KxNAS1Size":{ "type":"integer", - "max":33600, "min":1200 }, "KxNAS1Type":{ @@ -3160,7 +3170,7 @@ "type":"string", "max":1093, "min":9, - "pattern":"^s3:\\/\\/[a-z0-9][a-z0-9-]{1,61}[a-z0-9]\\/([^\\/]+\\/)*[^\\/]*$" + "pattern":"^s3:\\/\\/[a-z0-9][a-z0-9-.]{1,61}[a-z0-9]\\/([^\\/]+\\/)*[^\\/]*$" }, "SamlMetadataDocument":{ "type":"string", @@ -3498,6 +3508,7 @@ "activeVersions":{"shape":"KxDataviewActiveVersionList"}, "status":{"shape":"KxDataviewStatus"}, "autoUpdate":{"shape":"booleanValue"}, + "readWrite":{"shape":"booleanValue"}, "description":{"shape":"Description"}, "createdTimestamp":{"shape":"Timestamp"}, "lastModifiedTimestamp":{"shape":"Timestamp"} diff --git a/models/apis/finspace/2021-03-12/docs-2.json b/models/apis/finspace/2021-03-12/docs-2.json index 058237c95eb..744976afd14 100644 --- a/models/apis/finspace/2021-03-12/docs-2.json +++ b/models/apis/finspace/2021-03-12/docs-2.json @@ -901,18 +901,18 @@ "refs": { "CreateKxClusterRequest$azMode": "

The number of availability zones you want to assign per cluster. This can be one of the following

", "CreateKxClusterResponse$azMode": "

The number of availability zones you want to assign per cluster. This can be one of the following

", - "CreateKxDataviewRequest$azMode": "

The number of availability zones you want to assign per cluster. This can be one of the following

", - "CreateKxDataviewResponse$azMode": "

The number of availability zones you want to assign per cluster. This can be one of the following

", - "CreateKxVolumeRequest$azMode": "

The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE for volumes.

", - "CreateKxVolumeResponse$azMode": "

The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE for volumes.

", + "CreateKxDataviewRequest$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

", + "CreateKxDataviewResponse$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

", + "CreateKxVolumeRequest$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

", + "CreateKxVolumeResponse$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

", "GetKxClusterResponse$azMode": "

The number of availability zones you want to assign per cluster. This can be one of the following

", - "GetKxDataviewResponse$azMode": "

The number of availability zones you want to assign per cluster. This can be one of the following

", - "GetKxVolumeResponse$azMode": "

The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE for volumes.

", + "GetKxDataviewResponse$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

", + "GetKxVolumeResponse$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

", "KxCluster$azMode": "

The number of availability zones assigned per cluster. This can be one of the following:

", - "KxDataviewListEntry$azMode": "

The number of availability zones you want to assign per cluster. This can be one of the following

", - "KxVolume$azMode": "

The number of availability zones assigned to the volume. Currently, only SINGLE is supported.

", - "UpdateKxDataviewResponse$azMode": "

The number of availability zones you want to assign per cluster. This can be one of the following

", - "UpdateKxVolumeResponse$azMode": "

The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE for volumes.

" + "KxDataviewListEntry$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

", + "KxVolume$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

", + "UpdateKxDataviewResponse$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

", + "UpdateKxVolumeResponse$azMode": "

The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.

" } }, "KxCacheStorageConfiguration": { @@ -1262,10 +1262,10 @@ "KxHostType": { "base": null, "refs": { - "CreateKxScalingGroupRequest$hostType": "

The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.

", + "CreateKxScalingGroupRequest$hostType": "

The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.

You can add one of the following values:

", "CreateKxScalingGroupResponse$hostType": "

The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.

", - "GetKxScalingGroupResponse$hostType": "

The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.

", - "KxScalingGroup$hostType": "

The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.

" + "GetKxScalingGroupResponse$hostType": "

The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.

It can have one of the following values:

", + "KxScalingGroup$hostType": "

The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.

You can add one of the following values:

" } }, "KxNAS1Configuration": { @@ -2129,10 +2129,16 @@ "base": null, "refs": { "CreateKxDataviewRequest$autoUpdate": "

The option to specify whether you want to apply all the future additions and corrections automatically to the dataview, when you ingest new changesets. The default value is false.

", + "CreateKxDataviewRequest$readWrite": "

The option to specify whether you want to make the dataview writable to perform database maintenance. The following are some considerations related to writable dataviews.



", "CreateKxDataviewResponse$autoUpdate": "

The option to select whether you want to apply all the future additions and corrections automatically to the dataview when you ingest new changesets. The default value is false.

", + "CreateKxDataviewResponse$readWrite": "

Returns True if the dataview is created as writeable and False otherwise.

", "GetKxDataviewResponse$autoUpdate": "

The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when new changesets are ingested. The default value is false.

", + "GetKxDataviewResponse$readWrite": "

Returns True if the dataview is created as writeable and False otherwise.

", "KxDataviewListEntry$autoUpdate": "

The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when you ingest new changesets. The default value is false.

", - "UpdateKxDataviewResponse$autoUpdate": "

The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when new changesets are ingested. The default value is false.

" + "KxDataviewListEntry$readWrite": "

Returns True if the dataview is created as writeable and False otherwise.

", + "KxDataviewSegmentConfiguration$onDemand": "

Enables on-demand caching on the selected database path when a particular file or a column of the database is accessed. When on demand caching is True, dataviews perform minimal loading of files on the filesystem as needed. When it is set to False, everything is cached. The default value is False.

", + "UpdateKxDataviewResponse$autoUpdate": "

The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when new changesets are ingested. The default value is false.

", + "UpdateKxDataviewResponse$readWrite": "

Returns True if the dataview is created as writeable and False otherwise.

" } }, "dnsStatus": { diff --git a/models/apis/logs/2014-03-28/api-2.json b/models/apis/logs/2014-03-28/api-2.json index 72a8ee58a39..48174c7e8f6 100644 --- a/models/apis/logs/2014-03-28/api-2.json +++ b/models/apis/logs/2014-03-28/api-2.json @@ -2338,8 +2338,11 @@ "event":true }, "LogEvent":{ - "type":"string", - "min":1 + "type":"structure", + "members":{ + "timestamp":{"shape":"Timestamp"}, + "message":{"shape":"EventMessage"} + } }, "LogEventIndex":{"type":"integer"}, "LogGroup":{ diff --git a/models/apis/logs/2014-03-28/docs-2.json b/models/apis/logs/2014-03-28/docs-2.json index 714cd5a9cca..3761615478e 100644 --- a/models/apis/logs/2014-03-28/docs-2.json +++ b/models/apis/logs/2014-03-28/docs-2.json @@ -4,7 +4,7 @@ "operations": { "AssociateKmsKey": "

Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.

When you use AssociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation.

If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.

CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys.

It can take up to 5 minutes for this operation to take effect.

If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error.

", "CancelExportTask": "

Cancels the specified export task.

The task must be in the PENDING or RUNNING state.

", - "CreateDelivery": "

Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created.

Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

You can't update an existing delivery. You can only create and delete deliveries.

", + "CreateDelivery": "

Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created.

Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

You can't update an existing delivery. You can only create and delete deliveries.

", "CreateExportTask": "

Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.

Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.

Exporting to S3 buckets that are encrypted with AES-256 is supported.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.

", "CreateLogAnomalyDetector": "

Creates an anomaly detector that regularly scans one or more log groups and look for patterns and anomalies in the logs.

An anomaly detector can help surface issues by automatically discovering anomalies in your log event traffic. An anomaly detector uses machine learning algorithms to scan log events and find patterns. A pattern is a shared text structure that recurs among your log fields. Patterns provide a useful tool for analyzing large sets of logs because a large number of log events can often be compressed into a few patterns.

The anomaly detector uses pattern recognition to find anomalies, which are unusual log events. It uses the evaluationFrequency to compare current log events and patterns with trained baselines.

Fields within a pattern are called tokens. Fields that vary within a pattern, such as a request ID or timestamp, are referred to as dynamic tokens and represented by <*>.

The following is an example of a pattern:

[INFO] Request time: <*> ms

This pattern represents log events like [INFO] Request time: 327 ms and other similar log events that differ only by the number, in this csse 327. When the pattern is displayed, the different numbers are replaced by <*>

Any parts of log events that are masked as sensitive data are not scanned for anomalies. For more information about masking sensitive data, see Help protect sensitive log data with masking.

", "CreateLogGroup": "

Creates a log group with the specified name. You can create up to 1,000,000 log groups per Region per account.

You must use the following guidelines when naming a log group:

When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy.

If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.

If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error.

CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.

", @@ -25,7 +25,7 @@ "DeleteRetentionPolicy": "

Deletes the specified retention policy.

Log events do not expire if they belong to log groups without a retention policy.

", "DeleteSubscriptionFilter": "

Deletes the specified subscription filter.

", "DescribeAccountPolicies": "

Returns a list of all CloudWatch Logs account policies in the account.

", - "DescribeDeliveries": "

Retrieves a list of the deliveries that have been created in the account.

A delivery is a connection between a delivery source and a delivery destination .

A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.

", + "DescribeDeliveries": "

Retrieves a list of the deliveries that have been created in the account.

A delivery is a connection between a delivery source and a delivery destination .

A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.

", "DescribeDeliveryDestinations": "

Retrieves a list of the delivery destinations that have been created in the account.

", "DescribeDeliverySources": "

Retrieves a list of the delivery sources that have been created in the account.

", "DescribeDestinations": "

Lists all your destinations. The results are ASCII-sorted by destination name.

", @@ -40,7 +40,7 @@ "DisassociateKmsKey": "

Disassociates the specified KMS key from the specified log group or from all CloudWatch Logs Insights query results in the account.

When you use DisassociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation.

It can take up to 5 minutes for this operation to take effect.

", "FilterLogEvents": "

Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream.

You must have the logs:FilterLogEvents permission to perform this operation.

You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both.

By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token.

The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents request.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

", "GetDataProtectionPolicy": "

Returns information about a log group data protection policy.

", - "GetDelivery": "

Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination .

A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.

You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.

", + "GetDelivery": "

Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination .

A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.

You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.

", "GetDeliveryDestination": "

Retrieves complete information about one delivery destination.

", "GetDeliveryDestinationPolicy": "

Retrieves the delivery destination policy assigned to the delivery destination that you specify. For more information about delivery destinations and their policies, see PutDeliveryDestinationPolicy.

", "GetDeliverySource": "

Retrieves complete information about one delivery source.

", @@ -53,11 +53,11 @@ "ListLogAnomalyDetectors": "

Retrieves a list of the log anomaly detectors in the account.

", "ListTagsForResource": "

Displays the tags associated with a CloudWatch Logs resource. Currently, log groups and destinations support tagging.

", "ListTagsLogGroup": "

The ListTagsLogGroup operation is on the path to deprecation. We recommend that you use ListTagsForResource instead.

Lists the tags for the specified log group.

", - "PutAccountPolicy": "

Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account.

Data protection policy

A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.

Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions.

The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

Subscription filter policy

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

", + "PutAccountPolicy": "

Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account.

Data protection policy

A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.

Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions.

The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

Subscription filter policy

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

", "PutDataProtectionPolicy": "

Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data.

Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked.

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

The PutDataProtectionPolicy operation applies to only the specified log group. You can also use PutAccountPolicy to create an account-level data protection policy that applies to all log groups in the account, including both existing log groups and log groups that are created level. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

", - "PutDeliveryDestination": "

Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Kinesis Data Firehose are supported as logs delivery destinations.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.

", + "PutDeliveryDestination": "

Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.

", "PutDeliveryDestinationPolicy": "

Creates and assigns an IAM policy that grants permissions to CloudWatch Logs to deliver logs cross-account to a specified destination in this account. To configure the delivery of logs from an Amazon Web Services service in another account to a logs delivery destination in the current account, you must do the following:

Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

The contents of the policy must include two statements. One statement enables general logs delivery, and the other allows delivery to the chosen destination. See the examples for the needed policies.

", - "PutDeliverySource": "

Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.

To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.

", + "PutDeliverySource": "

Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose.

To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.

", "PutDestination": "

Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.

A destination encapsulates a physical resource (such as an Amazon Kinesis stream). With a destination, you can subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents.

Through an access policy, a destination controls what is written to it. By default, PutDestination does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination.

To perform a PutDestination operation, you must also have the iam:PassRole permission.

", "PutDestinationPolicy": "

Creates or updates an access policy associated with an existing destination. An access policy is an IAM policy document that is used to authorize claims to register a subscription filter against a given destination.

", "PutLogEvents": "

Uploads a batch of log events to the specified log stream.

The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is not valid. You can use parallel PutLogEvents actions on the same log stream.

The batch of events must satisfy the following constraints:

If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.

", @@ -65,7 +65,7 @@ "PutQueryDefinition": "

Creates or updates a query definition for CloudWatch Logs Insights. For more information, see Analyzing Log Data with CloudWatch Logs Insights.

To update a query definition, specify its queryDefinitionId in your request. The values of name, queryString, and logGroupNames are changed to the values that you specify in your update operation. No current values are retained from the current query definition. For example, imagine updating a current query definition that includes log groups. If you don't specify the logGroupNames parameter in your update operation, the query definition changes to contain no log groups.

You must have the logs:PutQueryDefinition permission to be able to perform this operation.

", "PutResourcePolicy": "

Creates or updates a resource policy allowing other Amazon Web Services services to put log events to this account, such as Amazon Route 53. An account can have up to 10 resource policies per Amazon Web Services Region.

", "PutRetentionPolicy": "

Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group.

CloudWatch Logs doesn’t immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer.

To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven’t been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted.

When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the storedBytes value to see how many bytes a log group is storing.

", - "PutSubscriptionFilter": "

Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName.

To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

", + "PutSubscriptionFilter": "

Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName.

To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

", "StartLiveTail": "

Starts a Live Tail streaming session for one or more log groups. A Live Tail session returns a stream of log events that have been recently ingested in the log groups. For more information, see Use Live Tail to view logs in near real time.

The response to this operation is a response stream, over which the server sends live log events and the client receives them.

The following objects are sent over the stream:

You can end a session before it times out by closing the session stream or by closing the client that is receiving the stream. The session also ends if the established connection between the client and the server breaks.

For examples of using an SDK to start a Live Tail session, see Start a Live Tail session using an Amazon Web Services SDK.

", "StartQuery": "

Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use.

For more information, see CloudWatch Logs Insights Query Syntax.

After you run a query using StartQuery, the query results are stored by CloudWatch Logs. You can use GetQueryResults to retrieve the results of a query, using the queryId that StartQuery returns.

If you have associated a KMS key with the query results in this account, then StartQuery uses that key to encrypt the results when it stores them. If no key is associated with query results, the query results are encrypted with the default CloudWatch Logs encryption method.

Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery operation, the query definition must be defined in the monitoring account.

You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.

", "StopQuery": "

Stops a CloudWatch Logs Insights query that is in progress. If the query has already ended, the operation returns an error indicating that the specified query is not running.

", @@ -121,7 +121,7 @@ "base": null, "refs": { "AccountPolicy$policyDocument": "

The policy document for this account policy.

The JSON specified in policyDocument can be up to 30,720 characters.

", - "PutAccountPolicyRequest$policyDocument": "

Specify the policy, in JSON.

Data protection policy

A data protection policy must include two JSON blocks:

For an example data protection policy, see the Examples section on this page.

The contents of the two DataIdentifer arrays must match exactly.

In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

The JSON specified in policyDocument can be up to 30,720 characters long.

Subscription filter policy

A subscription filter policy can include the following attributes in a JSON block:

" + "PutAccountPolicyRequest$policyDocument": "

Specify the policy, in JSON.

Data protection policy

A data protection policy must include two JSON blocks:

For an example data protection policy, see the Examples section on this page.

The contents of the two DataIdentifer arrays must match exactly.

In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

The JSON specified in policyDocument can be up to 30,720 characters long.

Subscription filter policy

A subscription filter policy can include the following attributes in a JSON block:

" } }, "AmazonResourceName": { @@ -200,7 +200,7 @@ "Delivery$arn": "

The Amazon Resource Name (ARN) that uniquely identifies this delivery.

", "Delivery$deliveryDestinationArn": "

The ARN of the delivery destination that is associated with this delivery.

", "DeliveryDestination$arn": "

The Amazon Resource Name (ARN) that uniquely identifies this delivery destination.

", - "DeliveryDestinationConfiguration$destinationResourceArn": "

The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.

", + "DeliveryDestinationConfiguration$destinationResourceArn": "

The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.

", "DeliverySource$arn": "

The Amazon Resource Name (ARN) that uniquely identifies this delivery source.

", "Destination$arn": "

The ARN of this destination.

", "LogGroup$arn": "

The Amazon Resource Name (ARN) of the log group. This version of the ARN includes a trailing :* after the log group name.

Use this version to refer to the ARN in IAM policies when specifying permissions for most API actions. The exception is when specifying permissions for TagResource, UntagResource, and ListTagsForResource. The permissions for those three actions require the ARN version that doesn't include a trailing :*.

", @@ -296,7 +296,7 @@ "base": null, "refs": { "GetDataProtectionPolicyResponse$policyDocument": "

The data protection policy document for this log group.

", - "PutDataProtectionPolicyRequest$policyDocument": "

Specify the data protection policy, in JSON.

This policy must include two JSON blocks:

For an example data protection policy, see the Examples section on this page.

The contents of the two DataIdentifer arrays must match exactly.

In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

The JSON specified in policyDocument can be up to 30,720 characters.

", + "PutDataProtectionPolicyRequest$policyDocument": "

Specify the data protection policy, in JSON.

This policy must include two JSON blocks:

For an example data protection policy, see the Examples section on this page.

The contents of the two DataIdentifer arrays must match exactly.

In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

The JSON specified in policyDocument can be up to 30,720 characters.

", "PutDataProtectionPolicyResponse$policyDocument": "

The data protection policy used for this log group.

" } }, @@ -414,7 +414,7 @@ } }, "DeliveryDestination": { - "base": "

This structure contains information about one delivery destination in your account. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, are supported as Kinesis Data Firehose delivery destinations.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

", + "base": "

This structure contains information about one delivery destination in your account. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, are supported as Firehose delivery destinations.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

", "refs": { "DeliveryDestinations$member": null, "GetDeliveryDestinationResponse$deliveryDestination": "

A structure containing information about the delivery destination.

", @@ -450,8 +450,8 @@ "DeliveryDestinationType": { "base": null, "refs": { - "Delivery$deliveryDestinationType": "

Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.

", - "DeliveryDestination$deliveryDestinationType": "

Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.

" + "Delivery$deliveryDestinationType": "

Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Firehose.

", + "DeliveryDestination$deliveryDestinationType": "

Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Firehose.

" } }, "DeliveryDestinations": { @@ -469,7 +469,7 @@ } }, "DeliverySource": { - "base": "

This structure contains information about one delivery source in your account. A delivery source is an Amazon Web Services resource that sends logs to an Amazon Web Services destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.

Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

", + "base": "

This structure contains information about one delivery source in your account. A delivery source is an Amazon Web Services resource that sends logs to an Amazon Web Services destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose.

Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services.

To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:

You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.

", "refs": { "DeliverySources$member": null, "GetDeliverySourceResponse$deliverySource": "

A structure containing information about the delivery source.

", @@ -776,6 +776,7 @@ "FilteredLogEvent$message": "

The data contained in the log event.

", "InputLogEvent$message": "

The raw event message. Each log event can be no larger than 256 KB.

", "LiveTailSessionLogEvent$message": "

The log event message text.

", + "LogEvent$message": "

The message content of the log event.

", "MetricFilterMatchRecord$eventMessage": "

The raw event data.

", "OutputLogEvent$message": "

The data contained in the log event.

", "TestEventMessages$member": null @@ -1218,7 +1219,7 @@ } }, "LogEvent": { - "base": null, + "base": "

This structure contains the information for one sample log event that is associated with an anomaly found by a log anomaly detector.

", "refs": { "LogSamples$member": null } @@ -1226,8 +1227,8 @@ "LogEventIndex": { "base": null, "refs": { - "RejectedLogEventsInfo$tooNewLogEventStartIndex": "

The log events that are too new.

", - "RejectedLogEventsInfo$tooOldLogEventEndIndex": "

The log events that are dated too far in the past.

", + "RejectedLogEventsInfo$tooNewLogEventStartIndex": "

The index of the first log event that is too new. This field is inclusive.

", + "RejectedLogEventsInfo$tooOldLogEventEndIndex": "

The index of the last log event that is too old. This field is exclusive.

", "RejectedLogEventsInfo$expiredLogEventEndIndex": "

The expired log events.

" } }, @@ -1410,7 +1411,7 @@ "base": null, "refs": { "DeliverySource$logType": "

The type of log that the source is sending. For valid values for this parameter, see the documentation for the source service.

", - "PutDeliverySourceRequest$logType": "

Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is EVENT_LOGS.

" + "PutDeliverySourceRequest$logType": "

Defines the type of log that the source is sending.

" } }, "MalformedQueryException": { @@ -2212,6 +2213,7 @@ "InputLogEvent$timestamp": "

The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

", "LiveTailSessionLogEvent$timestamp": "

The timestamp specifying when this log event was created.

", "LiveTailSessionLogEvent$ingestionTime": "

The timestamp specifying when this log event was ingested into the log group.

", + "LogEvent$timestamp": "

The time stamp of the log event.

", "LogGroup$creationTime": "

The creation time of the log group, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

", "LogStream$creationTime": "

The creation time of the stream, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

", "LogStream$firstEventTimestamp": "

The time of the first event, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

", diff --git a/models/apis/managedblockchain-query/2023-05-04/api-2.json b/models/apis/managedblockchain-query/2023-05-04/api-2.json index 36fcb627190..f8c8b852578 100644 --- a/models/apis/managedblockchain-query/2023-05-04/api-2.json +++ b/models/apis/managedblockchain-query/2023-05-04/api-2.json @@ -102,6 +102,23 @@ {"shape":"ServiceQuotaExceededException"} ] }, + "ListFilteredTransactionEvents":{ + "name":"ListFilteredTransactionEvents", + "http":{ + "method":"POST", + "requestUri":"/list-filtered-transaction-events", + "responseCode":200 + }, + "input":{"shape":"ListFilteredTransactionEventsInput"}, + "output":{"shape":"ListFilteredTransactionEventsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ] + }, "ListTokenBalances":{ "name":"ListTokenBalances", "http":{ @@ -167,6 +184,19 @@ }, "exception":true }, + "AddressIdentifierFilter":{ + "type":"structure", + "required":["transactionEventToAddress"], + "members":{ + "transactionEventToAddress":{"shape":"AddressIdentifierFilterTransactionEventToAddressList"} + } + }, + "AddressIdentifierFilterTransactionEventToAddressList":{ + "type":"list", + "member":{"shape":"ChainAddress"}, + "max":1, + "min":1 + }, "AssetContract":{ "type":"structure", "required":[ @@ -267,6 +297,10 @@ "time":{"shape":"Timestamp"} } }, + "Boolean":{ + "type":"boolean", + "box":true + }, "ChainAddress":{ "type":"string", "pattern":"[-A-Za-z0-9]{13,74}" @@ -454,6 +488,48 @@ "nextToken":{"shape":"NextToken"} } }, + "ListFilteredTransactionEventsInput":{ + "type":"structure", + "required":[ + "network", + "addressIdentifierFilter" + ], + "members":{ + "network":{"shape":"String"}, + "addressIdentifierFilter":{"shape":"AddressIdentifierFilter"}, + "timeFilter":{"shape":"TimeFilter"}, + "voutFilter":{"shape":"VoutFilter"}, + "confirmationStatusFilter":{"shape":"ConfirmationStatusFilter"}, + "sort":{"shape":"ListFilteredTransactionEventsSort"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"ListFilteredTransactionEventsInputMaxResultsInteger"} + } + }, + "ListFilteredTransactionEventsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":250, + "min":1 + }, + "ListFilteredTransactionEventsOutput":{ + "type":"structure", + "required":["events"], + "members":{ + "events":{"shape":"TransactionEventList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListFilteredTransactionEventsSort":{ + "type":"structure", + "members":{ + "sortBy":{"shape":"ListFilteredTransactionEventsSortBy"}, + "sortOrder":{"shape":"SortOrder"} + } + }, + "ListFilteredTransactionEventsSortBy":{ + "type":"string", + "enum":["blockchainInstant"] + }, "ListTokenBalancesInput":{ "type":"structure", "required":["tokenFilter"], @@ -480,12 +556,10 @@ }, "ListTransactionEventsInput":{ "type":"structure", - "required":[ - "transactionHash", - "network" - ], + "required":["network"], "members":{ "transactionHash":{"shape":"QueryTransactionHash"}, + "transactionId":{"shape":"QueryTransactionId"}, "network":{"shape":"QueryNetwork"}, "nextToken":{"shape":"NextToken"}, "maxResults":{"shape":"ListTransactionEventsInputMaxResultsInteger"} @@ -611,6 +685,10 @@ "type":"string", "pattern":"(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" }, + "QueryTransactionId":{ + "type":"string", + "pattern":"(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" + }, "QuotaCode":{"type":"string"}, "ResourceId":{"type":"string"}, "ResourceNotFoundException":{ @@ -690,6 +768,13 @@ "exception":true, "retryable":{"throttling":true} }, + "TimeFilter":{ + "type":"structure", + "members":{ + "from":{"shape":"BlockchainInstant"}, + "to":{"shape":"BlockchainInstant"} + } + }, "Timestamp":{"type":"timestamp"}, "TokenBalance":{ "type":"structure", @@ -779,7 +864,13 @@ "contractAddress":{"shape":"ChainAddress"}, "tokenId":{"shape":"QueryTokenId"}, "transactionId":{"shape":"String"}, - "voutIndex":{"shape":"Integer"} + "voutIndex":{"shape":"Integer"}, + "voutSpent":{"shape":"Boolean"}, + "spentVoutTransactionId":{"shape":"String"}, + "spentVoutTransactionHash":{"shape":"String"}, + "spentVoutIndex":{"shape":"Integer"}, + "blockchainInstant":{"shape":"BlockchainInstant"}, + "confirmationStatus":{"shape":"ConfirmationStatus"} } }, "TransactionEventList":{ @@ -848,6 +939,13 @@ "fieldValidationFailed", "other" ] + }, + "VoutFilter":{ + "type":"structure", + "required":["voutSpent"], + "members":{ + "voutSpent":{"shape":"Boolean"} + } } } } diff --git a/models/apis/managedblockchain-query/2023-05-04/docs-2.json b/models/apis/managedblockchain-query/2023-05-04/docs-2.json index efc92f33874..5c55a7f79eb 100644 --- a/models/apis/managedblockchain-query/2023-05-04/docs-2.json +++ b/models/apis/managedblockchain-query/2023-05-04/docs-2.json @@ -7,9 +7,10 @@ "GetTokenBalance": "

Gets the balance of a specific token, including native tokens, for a given address (wallet or contract) on the blockchain.

Only the native tokens BTC and ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

", "GetTransaction": "

Gets the details of a transaction.

This action will return transaction details for all transactions that are confirmed on the blockchain, even if they have not reached finality.

", "ListAssetContracts": "

Lists all the contracts for a given contract type deployed by an address (either a contract address or a wallet address).

The Bitcoin blockchain networks do not support this operation.

", + "ListFilteredTransactionEvents": "

Lists all the transaction events for an address on the blockchain.

This operation is only supported on the Bitcoin networks.

", "ListTokenBalances": "

This action returns the following for a given blockchain network:

You must always specify the network property of the tokenFilter when using this operation.

", - "ListTransactionEvents": "

An array of TransactionEvent objects. Each object contains details about the transaction event.

This action will return transaction details for all transactions that are confirmed on the blockchain, even if they have not reached finality.

", - "ListTransactions": "

Lists all of the transactions on a given wallet address or to a specific contract.

" + "ListTransactionEvents": "

Lists all the transaction events for a transaction

This action will return transaction details for all transactions that are confirmed on the blockchain, even if they have not reached finality.

", + "ListTransactions": "

Lists all the transaction events for a transaction.

" }, "shapes": { "AccessDeniedException": { @@ -17,6 +18,18 @@ "refs": { } }, + "AddressIdentifierFilter": { + "base": "

This is the container for the unique public address on the blockchain.

", + "refs": { + "ListFilteredTransactionEventsInput$addressIdentifierFilter": "

This is the unique public address on the blockchain for which the transaction events are being requested.

" + } + }, + "AddressIdentifierFilterTransactionEventToAddressList": { + "base": null, + "refs": { + "AddressIdentifierFilter$transactionEventToAddress": "

The container for the recipient address of the transaction.

" + } + }, "AssetContract": { "base": "

This container contains information about an contract.

", "refs": { @@ -87,13 +100,24 @@ "GetTokenBalanceOutput$lastUpdatedTime": null, "ListTransactionsInput$fromBlockchainInstant": null, "ListTransactionsInput$toBlockchainInstant": null, + "TimeFilter$from": null, + "TimeFilter$to": null, "TokenBalance$atBlockchainInstant": "

The time for when the TokenBalance is requested or the current time if a time is not provided in the request.

This time will only be recorded up to the second.

", - "TokenBalance$lastUpdatedTime": "

The Timestamp of the last transaction at which the balance for the token in the wallet was updated.

" + "TokenBalance$lastUpdatedTime": "

The Timestamp of the last transaction at which the balance for the token in the wallet was updated.

", + "TransactionEvent$blockchainInstant": null + } + }, + "Boolean": { + "base": null, + "refs": { + "TransactionEvent$voutSpent": "

Specifies if the transaction output is spent or unspent. This is only returned for BITCOIN_VOUT event types.

This is only returned for BITCOIN_VOUT event types.

", + "VoutFilter$voutSpent": "

Specifies if the transaction output is spent or unspent.

" } }, "ChainAddress": { "base": null, "refs": { + "AddressIdentifierFilterTransactionEventToAddressList$member": null, "AssetContract$deployerAddress": "

The address of the contract deployer.

", "ContractFilter$deployerAddress": "

The network address of the deployer.

", "ContractIdentifier$contractAddress": "

Container for the blockchain address about a contract.

", @@ -108,7 +132,7 @@ "Transaction$contractAddress": "

The blockchain address for the contract.

", "TransactionEvent$from": "

The wallet address initiating the transaction. It can either be a public key or a contract.

", "TransactionEvent$to": "

The wallet address receiving the transaction. It can either be a public key or a contract.

", - "TransactionEvent$contractAddress": "

The blockchain address. for the contract

" + "TransactionEvent$contractAddress": "

The blockchain address for the contract

" } }, "ConfirmationStatus": { @@ -116,13 +140,15 @@ "refs": { "ConfirmationStatusIncludeList$member": null, "Transaction$confirmationStatus": "

Specifies whether the transaction has reached Finality.

", + "TransactionEvent$confirmationStatus": "

This container specifies whether the transaction has reached Finality.

", "TransactionOutputItem$confirmationStatus": "

Specifies whether to list transactions that have not reached Finality.

" } }, "ConfirmationStatusFilter": { "base": "

The container for the ConfirmationStatusFilter that filters for the finality of the results.

", "refs": { - "ListTransactionsInput$confirmationStatusFilter": "

This filter is used to include transactions in the response that haven't reached finality . Transactions that have reached finiality are always part of the response.

" + "ListFilteredTransactionEventsInput$confirmationStatusFilter": null, + "ListTransactionsInput$confirmationStatusFilter": "

This filter is used to include transactions in the response that haven't reached finality . Transactions that have reached finality are always part of the response.

" } }, "ConfirmationStatusIncludeList": { @@ -214,10 +240,11 @@ "base": null, "refs": { "ContractMetadata$decimals": "

The decimals used by the token contract.

", - "InternalServerException$retryAfterSeconds": "

The container of the retryAfterSeconds value.

", + "InternalServerException$retryAfterSeconds": "

Specifies the retryAfterSeconds value.

", "ThrottlingException$retryAfterSeconds": "

The container of the retryAfterSeconds value.

", "Transaction$signatureV": "

The signature of the transaction. The Z coordinate of a point V.

", - "TransactionEvent$voutIndex": "

The position of the vout in the transaction output list.

" + "TransactionEvent$voutIndex": "

The position of the transaction output in the transaction output list.

", + "TransactionEvent$spentVoutIndex": "

The position of the spent transaction output in the output list of the creating transaction.

This is only returned for BITCOIN_VIN event types.

" } }, "InternalServerException": { @@ -233,7 +260,7 @@ "ListAssetContractsInputMaxResultsInteger": { "base": null, "refs": { - "ListAssetContractsInput$maxResults": "

The maximum number of contracts to list.

Default:100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + "ListAssetContractsInput$maxResults": "

The maximum number of contracts to list.

Default: 100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" } }, "ListAssetContractsOutput": { @@ -241,6 +268,34 @@ "refs": { } }, + "ListFilteredTransactionEventsInput": { + "base": null, + "refs": { + } + }, + "ListFilteredTransactionEventsInputMaxResultsInteger": { + "base": null, + "refs": { + "ListFilteredTransactionEventsInput$maxResults": "

The maximum number of transaction events to list.

Default: 100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + } + }, + "ListFilteredTransactionEventsOutput": { + "base": null, + "refs": { + } + }, + "ListFilteredTransactionEventsSort": { + "base": "

Lists all the transaction events for an address on the blockchain.

This operation is only supported on the Bitcoin blockchain networks.

", + "refs": { + "ListFilteredTransactionEventsInput$sort": "

The order by which the results will be sorted.

" + } + }, + "ListFilteredTransactionEventsSortBy": { + "base": null, + "refs": { + "ListFilteredTransactionEventsSort$sortBy": "

Container on how the results will be sorted by?

" + } + }, "ListTokenBalancesInput": { "base": null, "refs": { @@ -249,7 +304,7 @@ "ListTokenBalancesInputMaxResultsInteger": { "base": null, "refs": { - "ListTokenBalancesInput$maxResults": "

The maximum number of token balances to return.

Default:100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + "ListTokenBalancesInput$maxResults": "

The maximum number of token balances to return.

Default: 100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" } }, "ListTokenBalancesOutput": { @@ -265,7 +320,7 @@ "ListTransactionEventsInputMaxResultsInteger": { "base": null, "refs": { - "ListTransactionEventsInput$maxResults": "

The maximum number of transaction events to list.

Default:100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + "ListTransactionEventsInput$maxResults": "

The maximum number of transaction events to list.

Default: 100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" } }, "ListTransactionEventsOutput": { @@ -281,7 +336,7 @@ "ListTransactionsInputMaxResultsInteger": { "base": null, "refs": { - "ListTransactionsInput$maxResults": "

The maximum number of transactions to list.

Default:100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + "ListTransactionsInput$maxResults": "

The maximum number of transactions to list.

Default: 100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" } }, "ListTransactionsOutput": { @@ -292,7 +347,7 @@ "ListTransactionsSort": { "base": "

The container for determining how the list transaction result will be sorted.

", "refs": { - "ListTransactionsInput$sort": "

The order by which the results will be sorted. If ASCENNDING is selected, the results will be ordered by fromTime.

" + "ListTransactionsInput$sort": "

The order by which the results will be sorted.

" } }, "ListTransactionsSortBy": { @@ -313,6 +368,8 @@ "refs": { "ListAssetContractsInput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", "ListAssetContractsOutput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", + "ListFilteredTransactionEventsInput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", + "ListFilteredTransactionEventsOutput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", "ListTokenBalancesInput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", "ListTokenBalancesOutput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", "ListTransactionEventsInput$nextToken": "

The pagination token that indicates the next set of results to retrieve.

", @@ -328,7 +385,7 @@ } }, "OwnerIdentifier": { - "base": "

The container for the identifier of the owner.

", + "base": "

The container for the owner identifier.

", "refs": { "BatchGetTokenBalanceErrorItem$ownerIdentifier": null, "BatchGetTokenBalanceInputItem$ownerIdentifier": null, @@ -378,11 +435,17 @@ "QueryTransactionHash": { "base": null, "refs": { - "GetTransactionInput$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", - "ListTransactionEventsInput$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", - "Transaction$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", - "TransactionEvent$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", - "TransactionOutputItem$transactionHash": "

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + "GetTransactionInput$transactionHash": "

The hash of a transaction. It is generated when a transaction is created.

", + "ListTransactionEventsInput$transactionHash": "

The hash of a transaction. It is generated when a transaction is created.

", + "Transaction$transactionHash": "

The hash of a transaction. It is generated when a transaction is created.

", + "TransactionEvent$transactionHash": "

The hash of a transaction. It is generated when a transaction is created.

", + "TransactionOutputItem$transactionHash": "

The hash of a transaction. It is generated when a transaction is created.

" + } + }, + "QueryTransactionId": { + "base": null, + "refs": { + "ListTransactionEventsInput$transactionId": "

The identifier of a Bitcoin transaction. It is generated when a transaction is created.

transactionId is only supported on the Bitcoin networks.

" } }, "QuotaCode": { @@ -426,6 +489,7 @@ "SortOrder": { "base": null, "refs": { + "ListFilteredTransactionEventsSort$sortOrder": "

The container for the sort order for ListFilteredTransactionEvents. The SortOrder field only accepts the values ASCENDING and DESCENDING. Not providing SortOrder will default to ASCENDING.

", "ListTransactionsSort$sortOrder": "

The container for the sort order for ListTransactions. The SortOrder field only accepts the values ASCENDING and DESCENDING. Not providing SortOrder will default to ASCENDING.

" } }, @@ -438,6 +502,7 @@ "ContractMetadata$name": "

The name of the token contract.

", "ContractMetadata$symbol": "

The symbol of the token contract.

", "GetTokenBalanceOutput$balance": "

The container for the token balance.

", + "ListFilteredTransactionEventsInput$network": "

The blockchain network where the transaction occurred.

Valid Values: BITCOIN_MAINNET | BITCOIN_TESTNET

", "TokenBalance$balance": "

The container of the token balance.

", "Transaction$blockNumber": "

The block number in which the transaction is recorded.

", "Transaction$gasUsed": "

The amount of gas used for the transaction.

", @@ -446,9 +511,11 @@ "Transaction$signatureR": "

The signature of the transaction. The X coordinate of a point R.

", "Transaction$signatureS": "

The signature of the transaction. The Y coordinate of a point S.

", "Transaction$transactionFee": "

The transaction fee.

", - "Transaction$transactionId": "

The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", + "Transaction$transactionId": "

The identifier of a Bitcoin transaction. It is generated when a transaction is created.

", "TransactionEvent$value": "

The value that was transacted.

", - "TransactionEvent$transactionId": "

The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

", + "TransactionEvent$transactionId": "

The identifier of a Bitcoin transaction. It is generated when a transaction is created.

", + "TransactionEvent$spentVoutTransactionId": "

The transactionId that created the spent transaction output.

This is only returned for BITCOIN_VIN event types.

", + "TransactionEvent$spentVoutTransactionHash": "

The transactionHash that created the spent transaction output.

This is only returned for BITCOIN_VIN event types.

", "ValidationExceptionField$name": "

The name of the field that triggered the ValidationException.

", "ValidationExceptionField$message": "

The ValidationException message.

" } @@ -458,6 +525,12 @@ "refs": { } }, + "TimeFilter": { + "base": "

This container is used to specify a time frame.

", + "refs": { + "ListFilteredTransactionEventsInput$timeFilter": "

This container specifies the time frame for the transaction events returned in the response.

" + } + }, "Timestamp": { "base": null, "refs": { @@ -510,6 +583,7 @@ "TransactionEventList": { "base": null, "refs": { + "ListFilteredTransactionEventsOutput$events": "

The transaction events returned by the request.

", "ListTransactionEventsOutput$events": "

An array of TransactionEvent objects. Each object contains details about the transaction events.

" } }, @@ -547,6 +621,12 @@ "refs": { "ValidationException$reason": "

The container for the reason for the exception

" } + }, + "VoutFilter": { + "base": "

This container specifies filtering attributes related to BITCOIN_VOUT event types

", + "refs": { + "ListFilteredTransactionEventsInput$voutFilter": "

This container specifies filtering attributes related to BITCOIN_VOUT event types

" + } } } } diff --git a/models/apis/managedblockchain-query/2023-05-04/paginators-1.json b/models/apis/managedblockchain-query/2023-05-04/paginators-1.json index 7625c5cab4a..3948bd42661 100644 --- a/models/apis/managedblockchain-query/2023-05-04/paginators-1.json +++ b/models/apis/managedblockchain-query/2023-05-04/paginators-1.json @@ -6,6 +6,12 @@ "limit_key": "maxResults", "result_key": "contracts" }, + "ListFilteredTransactionEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "events" + }, "ListTokenBalances": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/service/cloudwatchlogs/api.go b/service/cloudwatchlogs/api.go index 0e88056423c..93f1dca6feb 100644 --- a/service/cloudwatchlogs/api.go +++ b/service/cloudwatchlogs/api.go @@ -295,7 +295,7 @@ func (c *CloudWatchLogs) CreateDeliveryRequest(input *CreateDeliveryInput) (req // in the table at Enabling logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) // // A delivery destination can represent a log group in CloudWatch Logs, an Amazon -// S3 bucket, or a delivery stream in Kinesis Data Firehose. +// S3 bucket, or a delivery stream in Firehose. // // To configure logs delivery between a supported Amazon Web Services service // and a destination, you must do the following: @@ -2371,9 +2371,9 @@ func (c *CloudWatchLogs) DescribeDeliveriesRequest(input *DescribeDeliveriesInpu // // A delivery source represents an Amazon Web Services resource that sends logs // to an logs delivery destination. The destination can be CloudWatch Logs, -// Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services -// support being configured as a delivery source. These services are listed -// in Enable logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// Amazon S3, or Firehose. Only some Amazon Web Services services support being +// configured as a delivery source. These services are listed in Enable logging +// from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4256,9 +4256,9 @@ func (c *CloudWatchLogs) GetDeliveryRequest(input *GetDeliveryInput) (req *reque // // A delivery source represents an Amazon Web Services resource that sends logs // to an logs delivery destination. The destination can be CloudWatch Logs, -// Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services -// support being configured as a delivery source. These services are listed -// in Enable logging from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) +// Amazon S3, or Firehose. Only some Amazon Web Services services support being +// configured as a delivery source. These services are listed in Enable logging +// from Amazon Web Services services. (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) // // You need to specify the delivery id in this operation. You can find the IDs // of the deliveries in your account with the DescribeDeliveries (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) @@ -5693,24 +5693,24 @@ func (c *CloudWatchLogs) PutAccountPolicyRequest(input *PutAccountPolicyInput) ( // CloudWatch Logs to other Amazon Web Services services. Account-level subscription // filter policies apply to both existing log groups and log groups that are // created later in this account. Supported destinations are Kinesis Data Streams, -// Kinesis Data Firehose, and Lambda. When log events are sent to the receiving -// service, they are Base64 encoded and compressed with the GZIP format. +// Firehose, and Lambda. When log events are sent to the receiving service, +// they are Base64 encoded and compressed with the GZIP format. // // The following destinations are supported for subscription filters: // // - An Kinesis Data Streams data stream in the same account as the subscription // policy, for same-account delivery. // -// - An Kinesis Data Firehose data stream in the same account as the subscription -// policy, for same-account delivery. +// - An Firehose data stream in the same account as the subscription policy, +// for same-account delivery. // // - A Lambda function in the same account as the subscription policy, for // same-account delivery. // // - A logical destination in a different account created with PutDestination // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html), -// for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose -// are supported as logical destinations. +// for cross-account delivery. Kinesis Data Streams and Firehose are supported +// as logical destinations. // // Each account can have one account-level subscription filter policy. If you // are updating an existing filter, you must specify the correct name in PolicyName. @@ -5923,8 +5923,8 @@ func (c *CloudWatchLogs) PutDeliveryDestinationRequest(input *PutDeliveryDestina // // Creates or updates a logical delivery destination. A delivery destination // is an Amazon Web Services resource that represents an Amazon Web Services -// service that logs can be sent to. CloudWatch Logs, Amazon S3, and Kinesis -// Data Firehose are supported as logs delivery destinations. +// service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose +// are supported as logs delivery destinations. // // To configure logs delivery between a supported Amazon Web Services service // and a destination, you must do the following: @@ -6166,7 +6166,7 @@ func (c *CloudWatchLogs) PutDeliverySourceRequest(input *PutDeliverySourceInput) // // Creates or updates a logical delivery source. A delivery source represents // an Amazon Web Services resource that sends logs to an logs delivery destination. -// The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. +// The destination can be CloudWatch Logs, Amazon S3, or Firehose. // // To configure logs delivery between a delivery destination and an Amazon Web // Services service that is supported as a delivery source, you must do the @@ -7046,7 +7046,7 @@ func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilt // // - A logical destination created with PutDestination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) // that belongs to a different account, for cross-account delivery. We currently -// support Kinesis Data Streams and Kinesis Data Firehose as logical destinations. +// support Kinesis Data Streams and Firehose as logical destinations. // // - An Amazon Kinesis Data Firehose delivery stream that belongs to the // same account as the subscription filter, for same-account delivery. @@ -8536,7 +8536,7 @@ type Anomaly struct { // anomaly. // // LogSamples is a required field - LogSamples []*string `locationName:"logSamples" type:"list" required:"true"` + LogSamples []*LogEvent `locationName:"logSamples" type:"list" required:"true"` // The ID of the pattern used to help identify this anomaly. // @@ -8658,7 +8658,7 @@ func (s *Anomaly) SetLogGroupArnList(v []*string) *Anomaly { } // SetLogSamples sets the LogSamples field's value. -func (s *Anomaly) SetLogSamples(v []*string) *Anomaly { +func (s *Anomaly) SetLogSamples(v []*LogEvent) *Anomaly { s.LogSamples = v return s } @@ -10944,7 +10944,7 @@ type Delivery struct { DeliveryDestinationArn *string `locationName:"deliveryDestinationArn" type:"string"` // Displays whether the delivery destination associated with this delivery is - // CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. + // CloudWatch Logs, Amazon S3, or Firehose. DeliveryDestinationType *string `locationName:"deliveryDestinationType" type:"string" enum:"DeliveryDestinationType"` // The name of the delivery source that is associated with this delivery. @@ -11014,7 +11014,7 @@ func (s *Delivery) SetTags(v map[string]*string) *Delivery { // This structure contains information about one delivery destination in your // account. A delivery destination is an Amazon Web Services resource that represents // an Amazon Web Services service that logs can be sent to. CloudWatch Logs, -// Amazon S3, are supported as Kinesis Data Firehose delivery destinations. +// Amazon S3, are supported as Firehose delivery destinations. // // To configure logs delivery between a supported Amazon Web Services service // and a destination, you must do the following: @@ -11049,7 +11049,7 @@ type DeliveryDestination struct { DeliveryDestinationConfiguration *DeliveryDestinationConfiguration `locationName:"deliveryDestinationConfiguration" type:"structure"` // Displays whether this delivery destination is CloudWatch Logs, Amazon S3, - // or Kinesis Data Firehose. + // or Firehose. DeliveryDestinationType *string `locationName:"deliveryDestinationType" type:"string" enum:"DeliveryDestinationType"` // The name of this delivery destination. @@ -11122,7 +11122,7 @@ type DeliveryDestinationConfiguration struct { // The ARN of the Amazon Web Services destination that this delivery destination // represents. That Amazon Web Services destination can be a log group in CloudWatch - // Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose. + // Logs, an Amazon S3 bucket, or a delivery stream in Firehose. // // DestinationResourceArn is a required field DestinationResourceArn *string `locationName:"destinationResourceArn" type:"string" required:"true"` @@ -11168,7 +11168,7 @@ func (s *DeliveryDestinationConfiguration) SetDestinationResourceArn(v string) * // This structure contains information about one delivery source in your account. // A delivery source is an Amazon Web Services resource that sends logs to an // Amazon Web Services destination. The destination can be CloudWatch Logs, -// Amazon S3, or Kinesis Data Firehose. +// Amazon S3, or Firehose. // // Only some Amazon Web Services services support being configured as a delivery // source. These services are listed as Supported [V2 Permissions] in the table @@ -15693,6 +15693,48 @@ func (s *LiveTailSessionUpdate) MarshalEvent(pm protocol.PayloadMarshaler) (msg return msg, err } +// This structure contains the information for one sample log event that is +// associated with an anomaly found by a log anomaly detector. +type LogEvent struct { + _ struct{} `type:"structure"` + + // The message content of the log event. + Message *string `locationName:"message" min:"1" type:"string"` + + // The time stamp of the log event. + Timestamp *int64 `locationName:"timestamp" type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogEvent) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *LogEvent) SetMessage(v string) *LogEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *LogEvent) SetTimestamp(v int64) *LogEvent { + s.Timestamp = &v + return s +} + // Represents a log group. type LogGroup struct { _ struct{} `type:"structure"` @@ -16556,8 +16598,8 @@ type PutAccountPolicyInput struct { // data terms. This Audit action must contain a FindingsDestination object. // You can optionally use that FindingsDestination object to list one or // more destinations to send audit findings to. If you specify destinations - // such as log groups, Kinesis Data Firehose streams, and S3 buckets, they - // must already exist. + // such as log groups, Firehose streams, and S3 buckets, they must already + // exist. // // * The second block must include both a DataIdentifer array and an Operation // property with an Deidentify action. The DataIdentifer array must exactly @@ -16585,12 +16627,12 @@ type PutAccountPolicyInput struct { // * DestinationArn The ARN of the destination to deliver log events to. // Supported destinations are: An Kinesis Data Streams data stream in the // same account as the subscription policy, for same-account delivery. An - // Kinesis Data Firehose data stream in the same account as the subscription - // policy, for same-account delivery. A Lambda function in the same account - // as the subscription policy, for same-account delivery. A logical destination - // in a different account created with PutDestination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html), - // for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose - // are supported as logical destinations. + // Firehose data stream in the same account as the subscription policy, for + // same-account delivery. A Lambda function in the same account as the subscription + // policy, for same-account delivery. A logical destination in a different + // account created with PutDestination (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html), + // for cross-account delivery. Kinesis Data Streams and Firehose are supported + // as logical destinations. // // * RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions // to deliver ingested log events to the destination stream. You don't need @@ -16754,8 +16796,8 @@ type PutDataProtectionPolicyInput struct { // data terms. This Audit action must contain a FindingsDestination object. // You can optionally use that FindingsDestination object to list one or // more destinations to send audit findings to. If you specify destinations - // such as log groups, Kinesis Data Firehose streams, and S3 buckets, they - // must already exist. + // such as log groups, Firehose streams, and S3 buckets, they must already + // exist. // // * The second block must include both a DataIdentifer array and an Operation // property with an Deidentify action. The DataIdentifer array must exactly @@ -17102,8 +17144,14 @@ func (s *PutDeliveryDestinationPolicyOutput) SetPolicy(v *Policy) *PutDeliveryDe type PutDeliverySourceInput struct { _ struct{} `type:"structure"` - // Defines the type of log that the source is sending. For Amazon CodeWhisperer, - // the valid value is EVENT_LOGS. + // Defines the type of log that the source is sending. + // + // * For Amazon CodeWhisperer, the valid value is EVENT_LOGS. + // + // * For IAM Identity Centerr, the valid value is ERROR_LOGS. + // + // * For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, + // WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS. // // LogType is a required field LogType *string `locationName:"logType" min:"1" type:"string" required:"true"` @@ -18537,10 +18585,10 @@ type RejectedLogEventsInfo struct { // The expired log events. ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` - // The log events that are too new. + // The index of the first log event that is too new. This field is inclusive. TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` - // The log events that are dated too far in the past. + // The index of the last log event that is too old. This field is exclusive. TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` } diff --git a/service/ec2/api.go b/service/ec2/api.go index a1f1cabe70a..dd8b128ffbd 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -26023,6 +26023,137 @@ func (c *EC2) DescribeLockedSnapshotsWithContext(ctx aws.Context, input *Describ return out, req.Send() } +const opDescribeMacHosts = "DescribeMacHosts" + +// DescribeMacHostsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMacHosts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMacHosts for more information on using the DescribeMacHosts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeMacHostsRequest method. +// req, resp := client.DescribeMacHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeMacHosts +func (c *EC2) DescribeMacHostsRequest(input *DescribeMacHostsInput) (req *request.Request, output *DescribeMacHostsOutput) { + op := &request.Operation{ + Name: opDescribeMacHosts, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMacHostsInput{} + } + + output = &DescribeMacHostsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMacHosts API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified EC2 Mac Dedicated Host or all of your EC2 Mac Dedicated +// Hosts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeMacHosts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeMacHosts +func (c *EC2) DescribeMacHosts(input *DescribeMacHostsInput) (*DescribeMacHostsOutput, error) { + req, out := c.DescribeMacHostsRequest(input) + return out, req.Send() +} + +// DescribeMacHostsWithContext is the same as DescribeMacHosts with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMacHosts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeMacHostsWithContext(ctx aws.Context, input *DescribeMacHostsInput, opts ...request.Option) (*DescribeMacHostsOutput, error) { + req, out := c.DescribeMacHostsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeMacHostsPages iterates over the pages of a DescribeMacHosts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMacHosts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMacHosts operation. +// pageNum := 0 +// err := client.DescribeMacHostsPages(params, +// func(page *ec2.DescribeMacHostsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *EC2) DescribeMacHostsPages(input *DescribeMacHostsInput, fn func(*DescribeMacHostsOutput, bool) bool) error { + return c.DescribeMacHostsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMacHostsPagesWithContext same as DescribeMacHostsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeMacHostsPagesWithContext(ctx aws.Context, input *DescribeMacHostsInput, fn func(*DescribeMacHostsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMacHostsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMacHostsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMacHostsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeManagedPrefixLists = "DescribeManagedPrefixLists" // DescribeManagedPrefixListsRequest generates a "aws/request.Request" representing the @@ -101675,6 +101806,125 @@ func (s *DescribeLockedSnapshotsOutput) SetSnapshots(v []*LockedSnapshotsInfo) * return s } +type DescribeMacHostsInput struct { + _ struct{} `type:"structure"` + + // The filters. + // + // * availability-zone - The Availability Zone of the EC2 Mac Dedicated Host. + // + // * instance-type - The instance type size that the EC2 Mac Dedicated Host + // is configured to support. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The IDs of the EC2 Mac Dedicated Hosts. + HostIds []*string `locationName:"HostId" locationNameList:"item" type:"list"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeMacHostsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeMacHostsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMacHostsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMacHostsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeMacHostsInput) SetFilters(v []*Filter) *DescribeMacHostsInput { + s.Filters = v + return s +} + +// SetHostIds sets the HostIds field's value. +func (s *DescribeMacHostsInput) SetHostIds(v []*string) *DescribeMacHostsInput { + s.HostIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeMacHostsInput) SetMaxResults(v int64) *DescribeMacHostsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMacHostsInput) SetNextToken(v string) *DescribeMacHostsInput { + s.NextToken = &v + return s +} + +type DescribeMacHostsOutput struct { + _ struct{} `type:"structure"` + + // Information about the EC2 Mac Dedicated Hosts. + MacHosts []*MacHost `locationName:"macHostSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeMacHostsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeMacHostsOutput) GoString() string { + return s.String() +} + +// SetMacHosts sets the MacHosts field's value. +func (s *DescribeMacHostsOutput) SetMacHosts(v []*MacHost) *DescribeMacHostsOutput { + s.MacHosts = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeMacHostsOutput) SetNextToken(v string) *DescribeMacHostsOutput { + s.NextToken = &v + return s +} + type DescribeManagedPrefixListsInput struct { _ struct{} `type:"structure"` @@ -146297,6 +146547,48 @@ func (s *LockedSnapshotsInfo) SetSnapshotId(v string) *LockedSnapshotsInfo { return s } +// Information about the EC2 Mac Dedicated Host. +type MacHost struct { + _ struct{} `type:"structure"` + + // The EC2 Mac Dedicated Host ID. + HostId *string `locationName:"hostId" type:"string"` + + // The latest macOS versions that the EC2 Mac Dedicated Host can launch without + // being upgraded. + MacOSLatestSupportedVersions []*string `locationName:"macOSLatestSupportedVersionSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MacHost) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MacHost) GoString() string { + return s.String() +} + +// SetHostId sets the HostId field's value. +func (s *MacHost) SetHostId(v string) *MacHost { + s.HostId = &v + return s +} + +// SetMacOSLatestSupportedVersions sets the MacOSLatestSupportedVersions field's value. +func (s *MacHost) SetMacOSLatestSupportedVersions(v []*string) *MacHost { + s.MacOSLatestSupportedVersions = v + return s +} + // Details for Site-to-Site VPN tunnel endpoint maintenance events. type MaintenanceDetails struct { _ struct{} `type:"structure"` diff --git a/service/ec2/ec2iface/interface.go b/service/ec2/ec2iface/interface.go index fac6d14abc0..a3d25b7cf52 100644 --- a/service/ec2/ec2iface/interface.go +++ b/service/ec2/ec2iface/interface.go @@ -1387,6 +1387,13 @@ type EC2API interface { DescribeLockedSnapshotsWithContext(aws.Context, *ec2.DescribeLockedSnapshotsInput, ...request.Option) (*ec2.DescribeLockedSnapshotsOutput, error) DescribeLockedSnapshotsRequest(*ec2.DescribeLockedSnapshotsInput) (*request.Request, *ec2.DescribeLockedSnapshotsOutput) + DescribeMacHosts(*ec2.DescribeMacHostsInput) (*ec2.DescribeMacHostsOutput, error) + DescribeMacHostsWithContext(aws.Context, *ec2.DescribeMacHostsInput, ...request.Option) (*ec2.DescribeMacHostsOutput, error) + DescribeMacHostsRequest(*ec2.DescribeMacHostsInput) (*request.Request, *ec2.DescribeMacHostsOutput) + + DescribeMacHostsPages(*ec2.DescribeMacHostsInput, func(*ec2.DescribeMacHostsOutput, bool) bool) error + DescribeMacHostsPagesWithContext(aws.Context, *ec2.DescribeMacHostsInput, func(*ec2.DescribeMacHostsOutput, bool) bool, ...request.Option) error + DescribeManagedPrefixLists(*ec2.DescribeManagedPrefixListsInput) (*ec2.DescribeManagedPrefixListsOutput, error) DescribeManagedPrefixListsWithContext(aws.Context, *ec2.DescribeManagedPrefixListsInput, ...request.Option) (*ec2.DescribeManagedPrefixListsOutput, error) DescribeManagedPrefixListsRequest(*ec2.DescribeManagedPrefixListsInput) (*request.Request, *ec2.DescribeManagedPrefixListsOutput) diff --git a/service/finspace/api.go b/service/finspace/api.go index 2dbea1676d6..140751ad9c4 100644 --- a/service/finspace/api.go +++ b/service/finspace/api.go @@ -6030,7 +6030,7 @@ type CreateKxClusterInput struct { AutoScalingConfiguration *AutoScalingConfiguration `locationName:"autoScalingConfiguration" type:"structure"` // The availability zone identifiers for the requested regions. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` // The number of availability zones you want to assign per cluster. This can // be one of the following @@ -6174,6 +6174,9 @@ func (s CreateKxClusterInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateKxClusterInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateKxClusterInput"} + if s.AvailabilityZoneId != nil && len(*s.AvailabilityZoneId) < 8 { + invalidParams.Add(request.NewErrParamMinLen("AvailabilityZoneId", 8)) + } if s.AzMode == nil { invalidParams.Add(request.NewErrParamRequired("AzMode")) } @@ -6417,7 +6420,7 @@ type CreateKxClusterOutput struct { AutoScalingConfiguration *AutoScalingConfiguration `locationName:"autoScalingConfiguration" type:"structure"` // The availability zone identifiers for the requested regions. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` // The number of availability zones you want to assign per cluster. This can // be one of the following @@ -6910,14 +6913,11 @@ type CreateKxDataviewInput struct { AutoUpdate *bool `locationName:"autoUpdate" type:"boolean"` // The identifier of the availability zones. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` - // The number of availability zones you want to assign per cluster. This can - // be one of the following - // - // * SINGLE – Assigns one availability zone per cluster. - // - // * MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. // // AzMode is a required field AzMode *string `locationName:"azMode" type:"string" required:"true" enum:"KxAzMode"` @@ -6947,6 +6947,24 @@ type CreateKxDataviewInput struct { // EnvironmentId is a required field EnvironmentId *string `location:"uri" locationName:"environmentId" min:"1" type:"string" required:"true"` + // The option to specify whether you want to make the dataview writable to perform + // database maintenance. The following are some considerations related to writable + // dataviews. + // + // * You cannot create partial writable dataviews. When you create writeable + // dataviews you must provide the entire database path. + // + // * You cannot perform updates on a writeable dataview. Hence, autoUpdate + // must be set as False if readWrite is True for a dataview. + // + // * You must also use a unique volume for creating a writeable dataview. + // So, if you choose a volume that is already in use by another dataview, + // the dataview creation fails. + // + // * Once you create a dataview as writeable, you cannot change it to read-only. + // So, you cannot update the readWrite parameter later. + ReadWrite *bool `locationName:"readWrite" type:"boolean"` + // The configuration that contains the database path of the data that you want // to place on each selected volume. Each segment must have a unique database // path for each volume. If you do not explicitly specify any database path @@ -6980,6 +6998,9 @@ func (s CreateKxDataviewInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateKxDataviewInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateKxDataviewInput"} + if s.AvailabilityZoneId != nil && len(*s.AvailabilityZoneId) < 8 { + invalidParams.Add(request.NewErrParamMinLen("AvailabilityZoneId", 8)) + } if s.AzMode == nil { invalidParams.Add(request.NewErrParamRequired("AzMode")) } @@ -7084,6 +7105,12 @@ func (s *CreateKxDataviewInput) SetEnvironmentId(v string) *CreateKxDataviewInpu return s } +// SetReadWrite sets the ReadWrite field's value. +func (s *CreateKxDataviewInput) SetReadWrite(v bool) *CreateKxDataviewInput { + s.ReadWrite = &v + return s +} + // SetSegmentConfigurations sets the SegmentConfigurations field's value. func (s *CreateKxDataviewInput) SetSegmentConfigurations(v []*KxDataviewSegmentConfiguration) *CreateKxDataviewInput { s.SegmentConfigurations = v @@ -7105,14 +7132,11 @@ type CreateKxDataviewOutput struct { AutoUpdate *bool `locationName:"autoUpdate" type:"boolean"` // The identifier of the availability zones. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` - // The number of availability zones you want to assign per cluster. This can - // be one of the following - // - // * SINGLE – Assigns one availability zone per cluster. - // - // * MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. AzMode *string `locationName:"azMode" type:"string" enum:"KxAzMode"` // A unique identifier for the changeset. @@ -7141,6 +7165,9 @@ type CreateKxDataviewOutput struct { // 1, 2021 12:00:00 PM UTC is specified as 1635768000000. LastModifiedTimestamp *time.Time `locationName:"lastModifiedTimestamp" type:"timestamp"` + // Returns True if the dataview is created as writeable and False otherwise. + ReadWrite *bool `locationName:"readWrite" type:"boolean"` + // The configuration that contains the database path of the data that you want // to place on each selected volume. Each segment must have a unique database // path for each volume. If you do not explicitly specify any database path @@ -7236,6 +7263,12 @@ func (s *CreateKxDataviewOutput) SetLastModifiedTimestamp(v time.Time) *CreateKx return s } +// SetReadWrite sets the ReadWrite field's value. +func (s *CreateKxDataviewOutput) SetReadWrite(v bool) *CreateKxDataviewOutput { + s.ReadWrite = &v + return s +} + // SetSegmentConfigurations sets the SegmentConfigurations field's value. func (s *CreateKxDataviewOutput) SetSegmentConfigurations(v []*KxDataviewSegmentConfiguration) *CreateKxDataviewOutput { s.SegmentConfigurations = v @@ -7442,7 +7475,7 @@ type CreateKxScalingGroupInput struct { // The identifier of the availability zones. // // AvailabilityZoneId is a required field - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string" required:"true"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string" required:"true"` // A token that ensures idempotency. This token expires in 10 minutes. ClientToken *string `locationName:"clientToken" min:"1" type:"string" idempotencyToken:"true"` @@ -7456,6 +7489,26 @@ type CreateKxScalingGroupInput struct { // The memory and CPU capabilities of the scaling group host on which FinSpace // Managed kdb clusters will be placed. // + // You can add one of the following values: + // + // * kx.sg.4xlarge – The host type with a configuration of 108 GiB memory + // and 16 vCPUs. + // + // * kx.sg.8xlarge – The host type with a configuration of 216 GiB memory + // and 32 vCPUs. + // + // * kx.sg.16xlarge – The host type with a configuration of 432 GiB memory + // and 64 vCPUs. + // + // * kx.sg.32xlarge – The host type with a configuration of 864 GiB memory + // and 128 vCPUs. + // + // * kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory + // and 64 vCPUs. + // + // * kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory + // and 96 vCPUs. + // // HostType is a required field HostType *string `locationName:"hostType" min:"1" type:"string" required:"true"` @@ -7493,6 +7546,9 @@ func (s *CreateKxScalingGroupInput) Validate() error { if s.AvailabilityZoneId == nil { invalidParams.Add(request.NewErrParamRequired("AvailabilityZoneId")) } + if s.AvailabilityZoneId != nil && len(*s.AvailabilityZoneId) < 8 { + invalidParams.Add(request.NewErrParamMinLen("AvailabilityZoneId", 8)) + } if s.ClientToken != nil && len(*s.ClientToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) } @@ -7564,7 +7620,7 @@ type CreateKxScalingGroupOutput struct { _ struct{} `type:"structure"` // The identifier of the availability zones. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` // The timestamp at which the scaling group was created in FinSpace. The value // is determined as epoch time in milliseconds. For example, the value for Monday, @@ -7843,8 +7899,9 @@ type CreateKxVolumeInput struct { // AvailabilityZoneIds is a required field AvailabilityZoneIds []*string `locationName:"availabilityZoneIds" type:"list" required:"true"` - // The number of availability zones you want to assign per cluster. Currently, - // FinSpace only support SINGLE for volumes. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. // // AzMode is a required field AzMode *string `locationName:"azMode" type:"string" required:"true" enum:"KxAzMode"` @@ -8004,8 +8061,9 @@ type CreateKxVolumeOutput struct { // The identifier of the availability zones. AvailabilityZoneIds []*string `locationName:"availabilityZoneIds" type:"list"` - // The number of availability zones you want to assign per cluster. Currently, - // FinSpace only support SINGLE for volumes. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. AzMode *string `locationName:"azMode" type:"string" enum:"KxAzMode"` // The timestamp at which the volume was created in FinSpace. The value is determined @@ -9619,7 +9677,7 @@ type GetKxClusterOutput struct { AutoScalingConfiguration *AutoScalingConfiguration `locationName:"autoScalingConfiguration" type:"structure"` // The availability zone identifiers for the requested regions. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` // The number of availability zones you want to assign per cluster. This can // be one of the following @@ -10310,14 +10368,11 @@ type GetKxDataviewOutput struct { AutoUpdate *bool `locationName:"autoUpdate" type:"boolean"` // The identifier of the availability zones. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` - // The number of availability zones you want to assign per cluster. This can - // be one of the following - // - // * SINGLE – Assigns one availability zone per cluster. - // - // * MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. AzMode *string `locationName:"azMode" type:"string" enum:"KxAzMode"` // A unique identifier of the changeset that you want to use to ingest data. @@ -10346,6 +10401,9 @@ type GetKxDataviewOutput struct { // 1, 2021 12:00:00 PM UTC is specified as 1635768000000. LastModifiedTimestamp *time.Time `locationName:"lastModifiedTimestamp" type:"timestamp"` + // Returns True if the dataview is created as writeable and False otherwise. + ReadWrite *bool `locationName:"readWrite" type:"boolean"` + // The configuration that contains the database path of the data that you want // to place on each selected volume. Each segment must have a unique database // path for each volume. If you do not explicitly specify any database path @@ -10450,6 +10508,12 @@ func (s *GetKxDataviewOutput) SetLastModifiedTimestamp(v time.Time) *GetKxDatavi return s } +// SetReadWrite sets the ReadWrite field's value. +func (s *GetKxDataviewOutput) SetReadWrite(v bool) *GetKxDataviewOutput { + s.ReadWrite = &v + return s +} + // SetSegmentConfigurations sets the SegmentConfigurations field's value. func (s *GetKxDataviewOutput) SetSegmentConfigurations(v []*KxDataviewSegmentConfiguration) *GetKxDataviewOutput { s.SegmentConfigurations = v @@ -10765,7 +10829,7 @@ type GetKxScalingGroupOutput struct { _ struct{} `type:"structure"` // The identifier of the availability zones. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` // The list of Managed kdb clusters that are currently active in the given scaling // group. @@ -10778,6 +10842,26 @@ type GetKxScalingGroupOutput struct { // The memory and CPU capabilities of the scaling group host on which FinSpace // Managed kdb clusters will be placed. + // + // It can have one of the following values: + // + // * kx.sg.4xlarge – The host type with a configuration of 108 GiB memory + // and 16 vCPUs. + // + // * kx.sg.8xlarge – The host type with a configuration of 216 GiB memory + // and 32 vCPUs. + // + // * kx.sg.16xlarge – The host type with a configuration of 432 GiB memory + // and 64 vCPUs. + // + // * kx.sg.32xlarge – The host type with a configuration of 864 GiB memory + // and 128 vCPUs. + // + // * kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory + // and 64 vCPUs. + // + // * kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory + // and 96 vCPUs. HostType *string `locationName:"hostType" min:"1" type:"string"` // The last time that the scaling group was updated in FinSpace. The value is @@ -11088,8 +11172,9 @@ type GetKxVolumeOutput struct { // The identifier of the availability zones. AvailabilityZoneIds []*string `locationName:"availabilityZoneIds" type:"list"` - // The number of availability zones you want to assign per cluster. Currently, - // FinSpace only support SINGLE for volumes. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. AzMode *string `locationName:"azMode" type:"string" enum:"KxAzMode"` // The timestamp at which the volume was created in FinSpace. The value is determined @@ -11662,7 +11747,7 @@ type KxCluster struct { _ struct{} `type:"structure"` // The availability zone identifiers for the requested regions. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` // The number of availability zones assigned per cluster. This can be one of // the following: @@ -12392,14 +12477,11 @@ type KxDataviewListEntry struct { AutoUpdate *bool `locationName:"autoUpdate" type:"boolean"` // The identifier of the availability zones. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` - // The number of availability zones you want to assign per cluster. This can - // be one of the following - // - // * SINGLE – Assigns one availability zone per cluster. - // - // * MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. AzMode *string `locationName:"azMode" type:"string" enum:"KxAzMode"` // A unique identifier for the changeset. @@ -12427,6 +12509,9 @@ type KxDataviewListEntry struct { // November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. LastModifiedTimestamp *time.Time `locationName:"lastModifiedTimestamp" type:"timestamp"` + // Returns True if the dataview is created as writeable and False otherwise. + ReadWrite *bool `locationName:"readWrite" type:"boolean"` + // The configuration that contains the database path of the data that you want // to place on each selected volume. Each segment must have a unique database // path for each volume. If you do not explicitly specify any database path @@ -12525,6 +12610,12 @@ func (s *KxDataviewListEntry) SetLastModifiedTimestamp(v time.Time) *KxDataviewL return s } +// SetReadWrite sets the ReadWrite field's value. +func (s *KxDataviewListEntry) SetReadWrite(v bool) *KxDataviewListEntry { + s.ReadWrite = &v + return s +} + // SetSegmentConfigurations sets the SegmentConfigurations field's value. func (s *KxDataviewListEntry) SetSegmentConfigurations(v []*KxDataviewSegmentConfiguration) *KxDataviewListEntry { s.SegmentConfigurations = v @@ -12557,6 +12648,12 @@ type KxDataviewSegmentConfiguration struct { // DbPaths is a required field DbPaths []*string `locationName:"dbPaths" min:"1" type:"list" required:"true"` + // Enables on-demand caching on the selected database path when a particular + // file or a column of the database is accessed. When on demand caching is True, + // dataviews perform minimal loading of files on the filesystem as needed. When + // it is set to False, everything is cached. The default value is False. + OnDemand *bool `locationName:"onDemand" type:"boolean"` + // The name of the volume where you want to add data. // // VolumeName is a required field @@ -12609,6 +12706,12 @@ func (s *KxDataviewSegmentConfiguration) SetDbPaths(v []*string) *KxDataviewSegm return s } +// SetOnDemand sets the OnDemand field's value. +func (s *KxDataviewSegmentConfiguration) SetOnDemand(v bool) *KxDataviewSegmentConfiguration { + s.OnDemand = &v + return s +} + // SetVolumeName sets the VolumeName field's value. func (s *KxDataviewSegmentConfiguration) SetVolumeName(v string) *KxDataviewSegmentConfiguration { s.VolumeName = &v @@ -12936,7 +13039,7 @@ type KxNode struct { // The identifier of the availability zones where subnets for the environment // are created. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` // The time when a particular node is started. The value is determined as epoch // time in milliseconds. For example, the value for Monday, November 1, 2021 @@ -13060,7 +13163,7 @@ type KxScalingGroup struct { _ struct{} `type:"structure"` // The identifier of the availability zones. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` // The list of clusters currently active in a given scaling group. Clusters []*string `locationName:"clusters" type:"list"` @@ -13072,6 +13175,26 @@ type KxScalingGroup struct { // The memory and CPU capabilities of the scaling group host on which FinSpace // Managed kdb clusters will be placed. + // + // You can add one of the following values: + // + // * kx.sg.4xlarge – The host type with a configuration of 108 GiB memory + // and 16 vCPUs. + // + // * kx.sg.8xlarge – The host type with a configuration of 216 GiB memory + // and 32 vCPUs. + // + // * kx.sg.16xlarge – The host type with a configuration of 432 GiB memory + // and 64 vCPUs. + // + // * kx.sg.32xlarge – The host type with a configuration of 864 GiB memory + // and 128 vCPUs. + // + // * kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory + // and 64 vCPUs. + // + // * kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory + // and 96 vCPUs. HostType *string `locationName:"hostType" min:"1" type:"string"` // The last time that the scaling group was updated in FinSpace. The value is @@ -13344,8 +13467,9 @@ type KxVolume struct { // The identifier of the availability zones. AvailabilityZoneIds []*string `locationName:"availabilityZoneIds" type:"list"` - // The number of availability zones assigned to the volume. Currently, only - // SINGLE is supported. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. AzMode *string `locationName:"azMode" type:"string" enum:"KxAzMode"` // The timestamp at which the volume was created in FinSpace. The value is determined @@ -16375,14 +16499,11 @@ type UpdateKxDataviewOutput struct { AutoUpdate *bool `locationName:"autoUpdate" type:"boolean"` // The identifier of the availability zones. - AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` + AvailabilityZoneId *string `locationName:"availabilityZoneId" min:"8" type:"string"` - // The number of availability zones you want to assign per cluster. This can - // be one of the following - // - // * SINGLE – Assigns one availability zone per cluster. - // - // * MULTI – Assigns all the availability zones per cluster. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. AzMode *string `locationName:"azMode" type:"string" enum:"KxAzMode"` // A unique identifier for the changeset. @@ -16411,6 +16532,9 @@ type UpdateKxDataviewOutput struct { // 1, 2021 12:00:00 PM UTC is specified as 1635768000000. LastModifiedTimestamp *time.Time `locationName:"lastModifiedTimestamp" type:"timestamp"` + // Returns True if the dataview is created as writeable and False otherwise. + ReadWrite *bool `locationName:"readWrite" type:"boolean"` + // The configuration that contains the database path of the data that you want // to place on each selected volume. Each segment must have a unique database // path for each volume. If you do not explicitly specify any database path @@ -16512,6 +16636,12 @@ func (s *UpdateKxDataviewOutput) SetLastModifiedTimestamp(v time.Time) *UpdateKx return s } +// SetReadWrite sets the ReadWrite field's value. +func (s *UpdateKxDataviewOutput) SetReadWrite(v bool) *UpdateKxDataviewOutput { + s.ReadWrite = &v + return s +} + // SetSegmentConfigurations sets the SegmentConfigurations field's value. func (s *UpdateKxDataviewOutput) SetSegmentConfigurations(v []*KxDataviewSegmentConfiguration) *UpdateKxDataviewOutput { s.SegmentConfigurations = v @@ -17313,8 +17443,9 @@ type UpdateKxVolumeOutput struct { // The identifier of the availability zones. AvailabilityZoneIds []*string `locationName:"availabilityZoneIds" type:"list"` - // The number of availability zones you want to assign per cluster. Currently, - // FinSpace only support SINGLE for volumes. + // The number of availability zones you want to assign per volume. Currently, + // FinSpace only supports SINGLE for volumes. This places dataview in a single + // AZ. AzMode *string `locationName:"azMode" type:"string" enum:"KxAzMode"` // The timestamp at which the volume was created in FinSpace. The value is determined diff --git a/service/managedblockchainquery/api.go b/service/managedblockchainquery/api.go index 2c134716760..f238bd594b5 100644 --- a/service/managedblockchainquery/api.go +++ b/service/managedblockchainquery/api.go @@ -570,6 +570,159 @@ func (c *ManagedBlockchainQuery) ListAssetContractsPagesWithContext(ctx aws.Cont return p.Err() } +const opListFilteredTransactionEvents = "ListFilteredTransactionEvents" + +// ListFilteredTransactionEventsRequest generates a "aws/request.Request" representing the +// client's request for the ListFilteredTransactionEvents operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListFilteredTransactionEvents for more information on using the ListFilteredTransactionEvents +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListFilteredTransactionEventsRequest method. +// req, resp := client.ListFilteredTransactionEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/ListFilteredTransactionEvents +func (c *ManagedBlockchainQuery) ListFilteredTransactionEventsRequest(input *ListFilteredTransactionEventsInput) (req *request.Request, output *ListFilteredTransactionEventsOutput) { + op := &request.Operation{ + Name: opListFilteredTransactionEvents, + HTTPMethod: "POST", + HTTPPath: "/list-filtered-transaction-events", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListFilteredTransactionEventsInput{} + } + + output = &ListFilteredTransactionEventsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListFilteredTransactionEvents API operation for Amazon Managed Blockchain Query. +// +// Lists all the transaction events for an address on the blockchain. +// +// This operation is only supported on the Bitcoin networks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Managed Blockchain Query's +// API operation ListFilteredTransactionEvents for usage and error information. +// +// Returned Error Types: +// +// - ThrottlingException +// The request or operation couldn't be performed because a service is throttling +// requests. The most common source of throttling errors is when you create +// resources that exceed your service limit for this resource type. Request +// a limit increase or delete unused resources, if possible. +// +// - ValidationException +// The resource passed is invalid. +// +// - AccessDeniedException +// The Amazon Web Services account doesn’t have access to this resource. +// +// - InternalServerException +// The request processing has failed because of an internal error in the service. +// +// - ServiceQuotaExceededException +// The service quota has been exceeded for this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-query-2023-05-04/ListFilteredTransactionEvents +func (c *ManagedBlockchainQuery) ListFilteredTransactionEvents(input *ListFilteredTransactionEventsInput) (*ListFilteredTransactionEventsOutput, error) { + req, out := c.ListFilteredTransactionEventsRequest(input) + return out, req.Send() +} + +// ListFilteredTransactionEventsWithContext is the same as ListFilteredTransactionEvents with the addition of +// the ability to pass a context and additional request options. +// +// See ListFilteredTransactionEvents for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) ListFilteredTransactionEventsWithContext(ctx aws.Context, input *ListFilteredTransactionEventsInput, opts ...request.Option) (*ListFilteredTransactionEventsOutput, error) { + req, out := c.ListFilteredTransactionEventsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListFilteredTransactionEventsPages iterates over the pages of a ListFilteredTransactionEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListFilteredTransactionEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListFilteredTransactionEvents operation. +// pageNum := 0 +// err := client.ListFilteredTransactionEventsPages(params, +// func(page *managedblockchainquery.ListFilteredTransactionEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *ManagedBlockchainQuery) ListFilteredTransactionEventsPages(input *ListFilteredTransactionEventsInput, fn func(*ListFilteredTransactionEventsOutput, bool) bool) error { + return c.ListFilteredTransactionEventsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListFilteredTransactionEventsPagesWithContext same as ListFilteredTransactionEventsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchainQuery) ListFilteredTransactionEventsPagesWithContext(ctx aws.Context, input *ListFilteredTransactionEventsInput, fn func(*ListFilteredTransactionEventsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListFilteredTransactionEventsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListFilteredTransactionEventsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListFilteredTransactionEventsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTokenBalances = "ListTokenBalances" // ListTokenBalancesRequest generates a "aws/request.Request" representing the @@ -780,8 +933,7 @@ func (c *ManagedBlockchainQuery) ListTransactionEventsRequest(input *ListTransac // ListTransactionEvents API operation for Amazon Managed Blockchain Query. // -// An array of TransactionEvent objects. Each object contains details about -// the transaction event. +// # Lists all the transaction events for a transaction // // This action will return transaction details for all transactions that are // confirmed on the blockchain, even if they have not reached finality (https://docs.aws.amazon.com/managed-blockchain/latest/ambq-dg/key-concepts.html#finality). @@ -935,8 +1087,7 @@ func (c *ManagedBlockchainQuery) ListTransactionsRequest(input *ListTransactions // ListTransactions API operation for Amazon Managed Blockchain Query. // -// Lists all of the transactions on a given wallet address or to a specific -// contract. +// Lists all the transaction events for a transaction. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1103,6 +1254,56 @@ func (s *AccessDeniedException) RequestID() string { return s.RespMetadata.RequestID } +// This is the container for the unique public address on the blockchain. +type AddressIdentifierFilter struct { + _ struct{} `type:"structure"` + + // The container for the recipient address of the transaction. + // + // TransactionEventToAddress is a required field + TransactionEventToAddress []*string `locationName:"transactionEventToAddress" min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AddressIdentifierFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AddressIdentifierFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddressIdentifierFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddressIdentifierFilter"} + if s.TransactionEventToAddress == nil { + invalidParams.Add(request.NewErrParamRequired("TransactionEventToAddress")) + } + if s.TransactionEventToAddress != nil && len(s.TransactionEventToAddress) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactionEventToAddress", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTransactionEventToAddress sets the TransactionEventToAddress field's value. +func (s *AddressIdentifierFilter) SetTransactionEventToAddress(v []*string) *AddressIdentifierFilter { + s.TransactionEventToAddress = v + return s +} + // This container contains information about an contract. type AssetContract struct { _ struct{} `type:"structure"` @@ -1182,7 +1383,7 @@ type BatchGetTokenBalanceErrorItem struct { // ErrorType is a required field ErrorType *string `locationName:"errorType" type:"string" required:"true" enum:"ErrorType"` - // The container for the identifier of the owner. + // The container for the owner identifier. OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure"` // The container for the identifier for the token including the unique token @@ -1309,7 +1510,7 @@ type BatchGetTokenBalanceInputItem struct { // The container for time. AtBlockchainInstant *BlockchainInstant `locationName:"atBlockchainInstant" type:"structure"` - // The container for the identifier of the owner. + // The container for the owner identifier. // // OwnerIdentifier is a required field OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure" required:"true"` @@ -1447,7 +1648,7 @@ type BatchGetTokenBalanceOutputItem struct { // The container for time. LastUpdatedTime *BlockchainInstant `locationName:"lastUpdatedTime" type:"structure"` - // The container for the identifier of the owner. + // The container for the owner identifier. OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure"` // The container for the identifier for the token including the unique token @@ -1994,7 +2195,7 @@ type GetTokenBalanceOutput struct { // The container for time. LastUpdatedTime *BlockchainInstant `locationName:"lastUpdatedTime" type:"structure"` - // The container for the identifier of the owner. + // The container for the owner identifier. OwnerIdentifier *OwnerIdentifier `locationName:"ownerIdentifier" type:"structure"` // The container for the identifier for the token including the unique token @@ -2061,8 +2262,7 @@ type GetTransactionInput struct { // Network is a required field Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. + // The hash of a transaction. It is generated when a transaction is created. // // TransactionHash is a required field TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` @@ -2155,7 +2355,7 @@ type InternalServerException struct { // The container for the exception message. Message_ *string `locationName:"message" min:"1" type:"string"` - // The container of the retryAfterSeconds value. + // Specifies the retryAfterSeconds value. RetryAfterSeconds *int64 `location:"header" locationName:"Retry-After" type:"integer"` } @@ -2225,7 +2425,7 @@ type ListAssetContractsInput struct { // The maximum number of contracts to list. // - // Default:100 + // Default: 100 // // Even if additional results can be retrieved, the request can return less // results than maxResults or an empty array of results. @@ -2338,12 +2538,247 @@ func (s *ListAssetContractsOutput) SetNextToken(v string) *ListAssetContractsOut return s } +type ListFilteredTransactionEventsInput struct { + _ struct{} `type:"structure"` + + // This is the unique public address on the blockchain for which the transaction + // events are being requested. + // + // AddressIdentifierFilter is a required field + AddressIdentifierFilter *AddressIdentifierFilter `locationName:"addressIdentifierFilter" type:"structure" required:"true"` + + // The container for the ConfirmationStatusFilter that filters for the finality + // (https://docs.aws.amazon.com/managed-blockchain/latest/ambq-dg/key-concepts.html#finality) + // of the results. + ConfirmationStatusFilter *ConfirmationStatusFilter `locationName:"confirmationStatusFilter" type:"structure"` + + // The maximum number of transaction events to list. + // + // Default: 100 + // + // Even if additional results can be retrieved, the request can return less + // results than maxResults or an empty array of results. + // + // To retrieve the next set of results, make another request with the returned + // nextToken value. The value of nextToken is null when there are no more results + // to return + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The blockchain network where the transaction occurred. + // + // Valid Values: BITCOIN_MAINNET | BITCOIN_TESTNET + // + // Network is a required field + Network *string `locationName:"network" type:"string" required:"true"` + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string `locationName:"nextToken" type:"string"` + + // The order by which the results will be sorted. + Sort *ListFilteredTransactionEventsSort `locationName:"sort" type:"structure"` + + // This container specifies the time frame for the transaction events returned + // in the response. + TimeFilter *TimeFilter `locationName:"timeFilter" type:"structure"` + + // This container specifies filtering attributes related to BITCOIN_VOUT event + // types + VoutFilter *VoutFilter `locationName:"voutFilter" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListFilteredTransactionEventsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListFilteredTransactionEventsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFilteredTransactionEventsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFilteredTransactionEventsInput"} + if s.AddressIdentifierFilter == nil { + invalidParams.Add(request.NewErrParamRequired("AddressIdentifierFilter")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Network == nil { + invalidParams.Add(request.NewErrParamRequired("Network")) + } + if s.AddressIdentifierFilter != nil { + if err := s.AddressIdentifierFilter.Validate(); err != nil { + invalidParams.AddNested("AddressIdentifierFilter", err.(request.ErrInvalidParams)) + } + } + if s.ConfirmationStatusFilter != nil { + if err := s.ConfirmationStatusFilter.Validate(); err != nil { + invalidParams.AddNested("ConfirmationStatusFilter", err.(request.ErrInvalidParams)) + } + } + if s.VoutFilter != nil { + if err := s.VoutFilter.Validate(); err != nil { + invalidParams.AddNested("VoutFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressIdentifierFilter sets the AddressIdentifierFilter field's value. +func (s *ListFilteredTransactionEventsInput) SetAddressIdentifierFilter(v *AddressIdentifierFilter) *ListFilteredTransactionEventsInput { + s.AddressIdentifierFilter = v + return s +} + +// SetConfirmationStatusFilter sets the ConfirmationStatusFilter field's value. +func (s *ListFilteredTransactionEventsInput) SetConfirmationStatusFilter(v *ConfirmationStatusFilter) *ListFilteredTransactionEventsInput { + s.ConfirmationStatusFilter = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListFilteredTransactionEventsInput) SetMaxResults(v int64) *ListFilteredTransactionEventsInput { + s.MaxResults = &v + return s +} + +// SetNetwork sets the Network field's value. +func (s *ListFilteredTransactionEventsInput) SetNetwork(v string) *ListFilteredTransactionEventsInput { + s.Network = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListFilteredTransactionEventsInput) SetNextToken(v string) *ListFilteredTransactionEventsInput { + s.NextToken = &v + return s +} + +// SetSort sets the Sort field's value. +func (s *ListFilteredTransactionEventsInput) SetSort(v *ListFilteredTransactionEventsSort) *ListFilteredTransactionEventsInput { + s.Sort = v + return s +} + +// SetTimeFilter sets the TimeFilter field's value. +func (s *ListFilteredTransactionEventsInput) SetTimeFilter(v *TimeFilter) *ListFilteredTransactionEventsInput { + s.TimeFilter = v + return s +} + +// SetVoutFilter sets the VoutFilter field's value. +func (s *ListFilteredTransactionEventsInput) SetVoutFilter(v *VoutFilter) *ListFilteredTransactionEventsInput { + s.VoutFilter = v + return s +} + +type ListFilteredTransactionEventsOutput struct { + _ struct{} `type:"structure"` + + // The transaction events returned by the request. + // + // Events is a required field + Events []*TransactionEvent `locationName:"events" type:"list" required:"true"` + + // The pagination token that indicates the next set of results to retrieve. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListFilteredTransactionEventsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListFilteredTransactionEventsOutput) GoString() string { + return s.String() +} + +// SetEvents sets the Events field's value. +func (s *ListFilteredTransactionEventsOutput) SetEvents(v []*TransactionEvent) *ListFilteredTransactionEventsOutput { + s.Events = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListFilteredTransactionEventsOutput) SetNextToken(v string) *ListFilteredTransactionEventsOutput { + s.NextToken = &v + return s +} + +// Lists all the transaction events for an address on the blockchain. +// +// This operation is only supported on the Bitcoin blockchain networks. +type ListFilteredTransactionEventsSort struct { + _ struct{} `type:"structure"` + + // Container on how the results will be sorted by? + SortBy *string `locationName:"sortBy" type:"string" enum:"ListFilteredTransactionEventsSortBy"` + + // The container for the sort order for ListFilteredTransactionEvents. The SortOrder + // field only accepts the values ASCENDING and DESCENDING. Not providing SortOrder + // will default to ASCENDING. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrder"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListFilteredTransactionEventsSort) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListFilteredTransactionEventsSort) GoString() string { + return s.String() +} + +// SetSortBy sets the SortBy field's value. +func (s *ListFilteredTransactionEventsSort) SetSortBy(v string) *ListFilteredTransactionEventsSort { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListFilteredTransactionEventsSort) SetSortOrder(v string) *ListFilteredTransactionEventsSort { + s.SortOrder = &v + return s +} + type ListTokenBalancesInput struct { _ struct{} `type:"structure"` // The maximum number of token balances to return. // - // Default:100 + // Default: 100 // // Even if additional results can be retrieved, the request can return less // results than maxResults or an empty array of results. @@ -2488,7 +2923,7 @@ type ListTransactionEventsInput struct { // The maximum number of transaction events to list. // - // Default:100 + // Default: 100 // // Even if additional results can be retrieved, the request can return less // results than maxResults or an empty array of results. @@ -2506,11 +2941,14 @@ type ListTransactionEventsInput struct { // The pagination token that indicates the next set of results to retrieve. NextToken *string `locationName:"nextToken" type:"string"` - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. + // The hash of a transaction. It is generated when a transaction is created. + TransactionHash *string `locationName:"transactionHash" type:"string"` + + // The identifier of a Bitcoin transaction. It is generated when a transaction + // is created. // - // TransactionHash is a required field - TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` + // transactionId is only supported on the Bitcoin networks. + TransactionId *string `locationName:"transactionId" type:"string"` } // String returns the string representation. @@ -2540,9 +2978,6 @@ func (s *ListTransactionEventsInput) Validate() error { if s.Network == nil { invalidParams.Add(request.NewErrParamRequired("Network")) } - if s.TransactionHash == nil { - invalidParams.Add(request.NewErrParamRequired("TransactionHash")) - } if invalidParams.Len() > 0 { return invalidParams @@ -2574,6 +3009,12 @@ func (s *ListTransactionEventsInput) SetTransactionHash(v string) *ListTransacti return s } +// SetTransactionId sets the TransactionId field's value. +func (s *ListTransactionEventsInput) SetTransactionId(v string) *ListTransactionEventsInput { + s.TransactionId = &v + return s +} + type ListTransactionEventsOutput struct { _ struct{} `type:"structure"` @@ -2627,7 +3068,7 @@ type ListTransactionsInput struct { // This filter is used to include transactions in the response that haven't // reached finality (https://docs.aws.amazon.com/managed-blockchain/latest/ambq-dg/key-concepts.html#finality). - // Transactions that have reached finiality are always part of the response. + // Transactions that have reached finality are always part of the response. ConfirmationStatusFilter *ConfirmationStatusFilter `locationName:"confirmationStatusFilter" type:"structure"` // The container for time. @@ -2635,7 +3076,7 @@ type ListTransactionsInput struct { // The maximum number of transactions to list. // - // Default:100 + // Default: 100 // // Even if additional results can be retrieved, the request can return less // results than maxResults or an empty array of results. @@ -2653,8 +3094,7 @@ type ListTransactionsInput struct { // The pagination token that indicates the next set of results to retrieve. NextToken *string `locationName:"nextToken" type:"string"` - // The order by which the results will be sorted. If ASCENNDING is selected, - // the results will be ordered by fromTime. + // The order by which the results will be sorted. Sort *ListTransactionsSort `locationName:"sort" type:"structure"` // The container for time. @@ -2883,7 +3323,7 @@ func (s *OwnerFilter) SetAddress(v string) *OwnerFilter { return s } -// The container for the identifier of the owner. +// The container for the owner identifier. type OwnerIdentifier struct { _ struct{} `type:"structure"` @@ -3171,6 +3611,47 @@ func (s *ThrottlingException) RequestID() string { return s.RespMetadata.RequestID } +// This container is used to specify a time frame. +type TimeFilter struct { + _ struct{} `type:"structure"` + + // The container for time. + From *BlockchainInstant `locationName:"from" type:"structure"` + + // The container for time. + To *BlockchainInstant `locationName:"to" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TimeFilter) GoString() string { + return s.String() +} + +// SetFrom sets the From field's value. +func (s *TimeFilter) SetFrom(v *BlockchainInstant) *TimeFilter { + s.From = v + return s +} + +// SetTo sets the To field's value. +func (s *TimeFilter) SetTo(v *BlockchainInstant) *TimeFilter { + s.To = v + return s +} + // The balance of the token. type TokenBalance struct { _ struct{} `type:"structure"` @@ -3460,14 +3941,13 @@ type Transaction struct { // The transaction fee. TransactionFee *string `locationName:"transactionFee" type:"string"` - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. + // The hash of a transaction. It is generated when a transaction is created. // // TransactionHash is a required field TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` - // The unique identifier of the transaction. It is generated whenever a transaction - // is verified and added to the blockchain. + // The identifier of a Bitcoin transaction. It is generated when a transaction + // is created. TransactionId *string `locationName:"transactionId" type:"string"` // The index of the transaction within a blockchain. @@ -3623,7 +4103,13 @@ func (s *Transaction) SetTransactionTimestamp(v time.Time) *Transaction { type TransactionEvent struct { _ struct{} `type:"structure"` - // The blockchain address. for the contract + // The container for time. + BlockchainInstant *BlockchainInstant `locationName:"blockchainInstant" type:"structure"` + + // This container specifies whether the transaction has reached Finality. + ConfirmationStatus *string `locationName:"confirmationStatus" type:"string" enum:"ConfirmationStatus"` + + // The blockchain address for the contract ContractAddress *string `locationName:"contractAddress" type:"string"` // The type of transaction event. @@ -3640,6 +4126,22 @@ type TransactionEvent struct { // Network is a required field Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` + // The position of the spent transaction output in the output list of the creating + // transaction. + // + // This is only returned for BITCOIN_VIN event types. + SpentVoutIndex *int64 `locationName:"spentVoutIndex" type:"integer"` + + // The transactionHash that created the spent transaction output. + // + // This is only returned for BITCOIN_VIN event types. + SpentVoutTransactionHash *string `locationName:"spentVoutTransactionHash" type:"string"` + + // The transactionId that created the spent transaction output. + // + // This is only returned for BITCOIN_VIN event types. + SpentVoutTransactionId *string `locationName:"spentVoutTransactionId" type:"string"` + // The wallet address receiving the transaction. It can either be a public key // or a contract. To *string `locationName:"to" type:"string"` @@ -3647,21 +4149,26 @@ type TransactionEvent struct { // The unique identifier for the token involved in the transaction. TokenId *string `locationName:"tokenId" type:"string"` - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. + // The hash of a transaction. It is generated when a transaction is created. // // TransactionHash is a required field TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` - // The unique identifier of the transaction. It is generated whenever a transaction - // is verified and added to the blockchain. + // The identifier of a Bitcoin transaction. It is generated when a transaction + // is created. TransactionId *string `locationName:"transactionId" type:"string"` // The value that was transacted. Value *string `locationName:"value" type:"string"` - // The position of the vout in the transaction output list. + // The position of the transaction output in the transaction output list. VoutIndex *int64 `locationName:"voutIndex" type:"integer"` + + // Specifies if the transaction output is spent or unspent. This is only returned + // for BITCOIN_VOUT event types. + // + // This is only returned for BITCOIN_VOUT event types. + VoutSpent *bool `locationName:"voutSpent" type:"boolean"` } // String returns the string representation. @@ -3682,6 +4189,18 @@ func (s TransactionEvent) GoString() string { return s.String() } +// SetBlockchainInstant sets the BlockchainInstant field's value. +func (s *TransactionEvent) SetBlockchainInstant(v *BlockchainInstant) *TransactionEvent { + s.BlockchainInstant = v + return s +} + +// SetConfirmationStatus sets the ConfirmationStatus field's value. +func (s *TransactionEvent) SetConfirmationStatus(v string) *TransactionEvent { + s.ConfirmationStatus = &v + return s +} + // SetContractAddress sets the ContractAddress field's value. func (s *TransactionEvent) SetContractAddress(v string) *TransactionEvent { s.ContractAddress = &v @@ -3706,6 +4225,24 @@ func (s *TransactionEvent) SetNetwork(v string) *TransactionEvent { return s } +// SetSpentVoutIndex sets the SpentVoutIndex field's value. +func (s *TransactionEvent) SetSpentVoutIndex(v int64) *TransactionEvent { + s.SpentVoutIndex = &v + return s +} + +// SetSpentVoutTransactionHash sets the SpentVoutTransactionHash field's value. +func (s *TransactionEvent) SetSpentVoutTransactionHash(v string) *TransactionEvent { + s.SpentVoutTransactionHash = &v + return s +} + +// SetSpentVoutTransactionId sets the SpentVoutTransactionId field's value. +func (s *TransactionEvent) SetSpentVoutTransactionId(v string) *TransactionEvent { + s.SpentVoutTransactionId = &v + return s +} + // SetTo sets the To field's value. func (s *TransactionEvent) SetTo(v string) *TransactionEvent { s.To = &v @@ -3742,6 +4279,12 @@ func (s *TransactionEvent) SetVoutIndex(v int64) *TransactionEvent { return s } +// SetVoutSpent sets the VoutSpent field's value. +func (s *TransactionEvent) SetVoutSpent(v bool) *TransactionEvent { + s.VoutSpent = &v + return s +} + // The container of the transaction output. type TransactionOutputItem struct { _ struct{} `type:"structure"` @@ -3754,8 +4297,7 @@ type TransactionOutputItem struct { // Network is a required field Network *string `locationName:"network" type:"string" required:"true" enum:"QueryNetwork"` - // The hash of the transaction. It is generated whenever a transaction is verified - // and added to the blockchain. + // The hash of a transaction. It is generated when a transaction is created. // // TransactionHash is a required field TransactionHash *string `locationName:"transactionHash" type:"string" required:"true"` @@ -3926,6 +4468,54 @@ func (s *ValidationExceptionField) SetName(v string) *ValidationExceptionField { return s } +// This container specifies filtering attributes related to BITCOIN_VOUT event +// types +type VoutFilter struct { + _ struct{} `type:"structure"` + + // Specifies if the transaction output is spent or unspent. + // + // VoutSpent is a required field + VoutSpent *bool `locationName:"voutSpent" type:"boolean" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VoutFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VoutFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VoutFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VoutFilter"} + if s.VoutSpent == nil { + invalidParams.Add(request.NewErrParamRequired("VoutSpent")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetVoutSpent sets the VoutSpent field's value. +func (s *VoutFilter) SetVoutSpent(v bool) *VoutFilter { + s.VoutSpent = &v + return s +} + const ( // ConfirmationStatusFinal is a ConfirmationStatus enum value ConfirmationStatusFinal = "FINAL" @@ -3974,6 +4564,18 @@ func ExecutionStatus_Values() []string { } } +const ( + // ListFilteredTransactionEventsSortByBlockchainInstant is a ListFilteredTransactionEventsSortBy enum value + ListFilteredTransactionEventsSortByBlockchainInstant = "blockchainInstant" +) + +// ListFilteredTransactionEventsSortBy_Values returns all elements of the ListFilteredTransactionEventsSortBy enum +func ListFilteredTransactionEventsSortBy_Values() []string { + return []string{ + ListFilteredTransactionEventsSortByBlockchainInstant, + } +} + const ( // ListTransactionsSortByTransactionTimestamp is a ListTransactionsSortBy enum value ListTransactionsSortByTransactionTimestamp = "TRANSACTION_TIMESTAMP" diff --git a/service/managedblockchainquery/managedblockchainqueryiface/interface.go b/service/managedblockchainquery/managedblockchainqueryiface/interface.go index 5f5f15f0d56..a562ebd986b 100644 --- a/service/managedblockchainquery/managedblockchainqueryiface/interface.go +++ b/service/managedblockchainquery/managedblockchainqueryiface/interface.go @@ -83,6 +83,13 @@ type ManagedBlockchainQueryAPI interface { ListAssetContractsPages(*managedblockchainquery.ListAssetContractsInput, func(*managedblockchainquery.ListAssetContractsOutput, bool) bool) error ListAssetContractsPagesWithContext(aws.Context, *managedblockchainquery.ListAssetContractsInput, func(*managedblockchainquery.ListAssetContractsOutput, bool) bool, ...request.Option) error + ListFilteredTransactionEvents(*managedblockchainquery.ListFilteredTransactionEventsInput) (*managedblockchainquery.ListFilteredTransactionEventsOutput, error) + ListFilteredTransactionEventsWithContext(aws.Context, *managedblockchainquery.ListFilteredTransactionEventsInput, ...request.Option) (*managedblockchainquery.ListFilteredTransactionEventsOutput, error) + ListFilteredTransactionEventsRequest(*managedblockchainquery.ListFilteredTransactionEventsInput) (*request.Request, *managedblockchainquery.ListFilteredTransactionEventsOutput) + + ListFilteredTransactionEventsPages(*managedblockchainquery.ListFilteredTransactionEventsInput, func(*managedblockchainquery.ListFilteredTransactionEventsOutput, bool) bool) error + ListFilteredTransactionEventsPagesWithContext(aws.Context, *managedblockchainquery.ListFilteredTransactionEventsInput, func(*managedblockchainquery.ListFilteredTransactionEventsOutput, bool) bool, ...request.Option) error + ListTokenBalances(*managedblockchainquery.ListTokenBalancesInput) (*managedblockchainquery.ListTokenBalancesOutput, error) ListTokenBalancesWithContext(aws.Context, *managedblockchainquery.ListTokenBalancesInput, ...request.Option) (*managedblockchainquery.ListTokenBalancesOutput, error) ListTokenBalancesRequest(*managedblockchainquery.ListTokenBalancesInput) (*request.Request, *managedblockchainquery.ListTokenBalancesOutput)