diff --git a/CHANGELOG.md b/CHANGELOG.md index 22c6bdf5458..342bb9f7ba0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +Release v1.42.24 (2021-12-20) +=== + +### Service Client Updates +* `service/apigateway`: Updates service documentation + * Documentation updates for Amazon API Gateway +* `service/customer-profiles`: Updates service API and documentation +* `service/datasync`: Updates service API and documentation +* `service/devops-guru`: Updates service API, documentation, and paginators +* `service/finspace-data`: Updates service API and documentation +* `service/forecast`: Updates service API and documentation +* `service/imagebuilder`: Updates service API and documentation +* `service/location`: Updates service API and documentation +* `service/redshift`: Updates service API and documentation + * This release adds API support for managed Redshift datashares. Customers can now interact with a Redshift datashare that is managed by a different service, such as AWS Data Exchange. +* `service/sagemaker`: Updates service API and documentation + * This release adds a new ContentType field in AutoMLChannel for SageMaker CreateAutoMLJob InputDataConfig. +* `service/securityhub`: Updates service API and documentation + Release v1.42.23 (2021-12-13) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index e29642dadca..1b5b43bcc18 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -2959,6 +2959,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9723,6 +9726,9 @@ var awsPartition = partition{ endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -12199,6 +12205,37 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "lookoutmetrics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "lookoutvision": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -12829,6 +12866,52 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, "messaging-chime": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -16309,6 +16392,15 @@ var awsPartition = partition{ Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, endpointKey{ Region: "aws-global", }: endpoint{ diff --git a/aws/version.go b/aws/version.go index b6855f09dd3..43b408e7ec9 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.42.23" +const SDKVersion = "1.42.24" diff --git a/models/apis/apigateway/2015-07-09/docs-2.json b/models/apis/apigateway/2015-07-09/docs-2.json index ae4ed5472e4..5a628b0c517 100644 --- a/models/apis/apigateway/2015-07-09/docs-2.json +++ b/models/apis/apigateway/2015-07-09/docs-2.json @@ -570,7 +570,7 @@ "CanarySettings$percentTraffic": "
The percent (0-100) of traffic diverted to a canary deployment.
", "DeploymentCanarySettings$percentTraffic": "The percentage (0.0-100.0) of traffic routed to the canary deployment.
", "MethodSetting$throttlingRateLimit": "Specifies the throttling rate limit. The PATCH path for this setting is /{method_setting_key}/throttling/rateLimit
, and the value is a double.
The API request steady-state rate limit.
" + "ThrottleSettings$rateLimit": "The API target request rate limit.
" } }, "EndpointConfiguration": { @@ -880,11 +880,11 @@ "Integration$timeoutInMillis": "Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds.
", "MethodSetting$throttlingBurstLimit": "Specifies the throttling burst limit. The PATCH path for this setting is /{method_setting_key}/throttling/burstLimit
, and the value is an integer.
Specifies the time to live (TTL), in seconds, for cached responses. The higher the TTL, the longer the response will be cached. The PATCH path for this setting is /{method_setting_key}/caching/ttlInSeconds
, and the value is an integer.
The maximum number of requests that can be made in a given time period.
", + "QuotaSettings$limit": "The target maximum number of requests that can be made in a given time period.
", "QuotaSettings$offset": "The day that a time period starts. For example, with a time period of WEEK
, an offset of 0
starts on Sunday, and an offset of 1
starts on Monday.
The HTTP status code that the client would have received. Value is 0 if the authorizer succeeded.
", "TestInvokeMethodResponse$status": "The HTTP status code.
", - "ThrottleSettings$burstLimit": "The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
" + "ThrottleSettings$burstLimit": "The API target request burst rate limit. This allows more requests through for a period of time than the target rate limit.
" } }, "Integration": { @@ -1408,7 +1408,7 @@ "base": "Quotas configured for a usage plan.
", "refs": { "CreateUsagePlanRequest$quota": "The quota of the usage plan.
", - "UsagePlan$quota": "The maximum number of permitted requests per a given unit time interval.
" + "UsagePlan$quota": "The target maximum number of permitted requests per a given unit time interval.
" } }, "RequestValidator": { @@ -1994,7 +1994,7 @@ "Account$throttleSettings": "Specifies the API request limits configured for the current Account.
", "CreateUsagePlanRequest$throttle": "The throttling limits of the usage plan.
", "MapOfApiStageThrottleSettings$value": null, - "UsagePlan$throttle": "The request throttle limits of a usage plan.
" + "UsagePlan$throttle": "Map containing method level throttling information for API stage in a usage plan.
" } }, "Timestamp": { @@ -2156,7 +2156,7 @@ } }, "UsagePlan": { - "base": "Represents a usage plan than can specify who can assess associated API stages with specified request limits and quotas.
In a usage plan, you associate an API by specifying the API's Id and a stage name of the specified API. You add plan customers by adding API keys to the plan.
Represents a usage plan used to specify who can assess associated API stages. Optionally, target request rate and quota limits can be set. In some cases clients can exceed the targets that you set. Don’t rely on usage plans to control costs. Consider using AWS Budgets to monitor costs and AWS WAF to manage API requests.
In a usage plan, you associate an API by specifying the API's Id and a stage name of the specified API. You add plan customers by adding API keys to the plan.
Returns information about a specific domain.
", "GetIdentityResolutionJob": "Returns information about an Identity Resolution Job in a specific domain.
Identity Resolution Jobs are set up using the Amazon Connect admin console. For more information, see Use Identity Resolution to consolidate similar profiles.
", "GetIntegration": "Returns an integration for a domain.
", - "GetMatches": "This API is in preview release for Amazon Connect and subject to change.
Before calling this API, use CreateDomain or UpdateDomain to enable identity resolution: set Matching
to true.
GetMatches returns potentially matching profiles, based on the results of the latest run of a machine learning process.
The process of matching duplicate profiles. If Matching
= true
, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.
After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig
in the MatchingRequest
, you can download the results from S3.
Amazon Connect uses the following profile attributes to identify matches:
PhoneNumber
HomePhoneNumber
BusinessPhoneNumber
MobilePhoneNumber
EmailAddress
PersonalEmailAddress
BusinessEmailAddress
FullName
BusinessName
For example, two or more profiles—with spelling mistakes such as John Doe and Jhn Doe, or different casing email addresses such as JOHN_DOE@ANYCOMPANY.COM and johndoe@anycompany.com, or different phone number formats such as 555-010-0000 and +1-555-010-0000—can be detected as belonging to the same customer John Doe and merged into a unified profile.
", + "GetMatches": "Before calling this API, use CreateDomain or UpdateDomain to enable identity resolution: set Matching
to true.
GetMatches returns potentially matching profiles, based on the results of the latest run of a machine learning process.
The process of matching duplicate profiles. If Matching
= true
, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.
After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig
in the MatchingRequest
, you can download the results from S3.
Amazon Connect uses the following profile attributes to identify matches:
PhoneNumber
HomePhoneNumber
BusinessPhoneNumber
MobilePhoneNumber
EmailAddress
PersonalEmailAddress
BusinessEmailAddress
FullName
BusinessName
For example, two or more profiles—with spelling mistakes such as John Doe and Jhn Doe, or different casing email addresses such as JOHN_DOE@ANYCOMPANY.COM and johndoe@anycompany.com, or different phone number formats such as 555-010-0000 and +1-555-010-0000—can be detected as belonging to the same customer John Doe and merged into a unified profile.
", "GetProfileObjectType": "Returns the object types for a specific domain.
", "GetProfileObjectTypeTemplate": "Returns the template information for a specific object type.
A template is a predefined ProfileObjectType, such as “Salesforce-Account” or “Salesforce-Contact.” When a user sends a ProfileObject, using the PutProfileObject API, with an ObjectTypeName that matches one of the TemplateIds, it uses the mappings from the template.
", "ListAccountIntegrations": "Lists all of the integrations associated to a specific URI in the AWS account.
", @@ -26,7 +26,7 @@ "ListProfileObjectTypes": "Lists all of the templates available within the service.
", "ListProfileObjects": "Returns a list of objects associated with a profile of a given ProfileObjectType.
", "ListTagsForResource": "Displays the tags associated with an Amazon Connect Customer Profiles resource. In Connect Customer Profiles, domains, profile object types, and integrations can be tagged.
", - "MergeProfiles": "This API is in preview release for Amazon Connect and subject to change.
Runs an AWS Lambda job that does the following:
All the profileKeys in the ProfileToBeMerged
will be moved to the main profile.
All the objects in the ProfileToBeMerged
will be moved to the main profile.
All the ProfileToBeMerged
will be deleted at the end.
All the profileKeys in the ProfileIdsToBeMerged
will be moved to the main profile.
Standard fields are merged as follows:
Fields are always \"union\"-ed if there are no conflicts in standard fields or attributeKeys.
When there are conflicting fields:
If no SourceProfileIds
entry is specified, the main Profile value is always taken.
If a SourceProfileIds
entry is specified, the specified profileId is always taken, even if it is a NULL value.
You can use MergeProfiles together with GetMatches, which returns potentially matching profiles, or use it with the results of another matching system. After profiles have been merged, they cannot be separated (unmerged).
", + "MergeProfiles": "Runs an AWS Lambda job that does the following:
All the profileKeys in the ProfileToBeMerged
will be moved to the main profile.
All the objects in the ProfileToBeMerged
will be moved to the main profile.
All the ProfileToBeMerged
will be deleted at the end.
All the profileKeys in the ProfileIdsToBeMerged
will be moved to the main profile.
Standard fields are merged as follows:
Fields are always \"union\"-ed if there are no conflicts in standard fields or attributeKeys.
When there are conflicting fields:
If no SourceProfileIds
entry is specified, the main Profile value is always taken.
If a SourceProfileIds
entry is specified, the specified profileId is always taken, even if it is a NULL value.
You can use MergeProfiles together with GetMatches, which returns potentially matching profiles, or use it with the results of another matching system. After profiles have been merged, they cannot be separated (unmerged).
", "PutIntegration": "Adds an integration between the service and a third-party service, which includes Amazon AppFlow and Amazon Connect.
An integration can belong to only one domain.
", "PutProfileObject": "Adds additional objects to customer profiles of a given ObjectType.
When adding a specific profile object, like a Contact Trace Record (CTR), an inferred profile can get created if it is not mapped to an existing profile. The resulting profile will only have a phone number populated in the standard ProfileObject. Any additional CTRs with the same phone number will be mapped to the same inferred profile.
When a ProfileObject is created and if a ProfileObjectType already exists for the ProfileObject, it will provide data to a standard profile depending on the ProfileObjectType definition.
PutProfileObject needs an ObjectType, which can be created using PutProfileObjectType.
", "PutProfileObjectType": "Defines a ProfileObjectType.
", @@ -82,7 +82,7 @@ "ListProfileObjectTypesRequest$DomainName": "The unique name of the domain.
", "ListProfileObjectsRequest$DomainName": "The unique name of the domain.
", "MergeProfilesRequest$DomainName": "The unique name of the domain.
", - "ObjectFilter$KeyName": "A searchable identifier of a standard profile object. The predefined keys you can use to search for _asset include: _assetId, _assetName, _serialNumber. The predefined keys you can use to search for _case include: _caseId.
", + "ObjectFilter$KeyName": "A searchable identifier of a standard profile object. The predefined keys you can use to search for _asset include: _assetId, _assetName, _serialNumber. The predefined keys you can use to search for _case include: _caseId. The predefined keys you can use to search for _order include: _orderId.
", "PutIntegrationRequest$DomainName": "The unique name of the domain.
", "PutIntegrationResponse$DomainName": "The unique name of the domain.
", "PutProfileObjectRequest$DomainName": "The unique name of the domain.
", @@ -90,7 +90,7 @@ "PutProfileObjectTypeRequest$TemplateId": "A unique identifier for the object template.
", "PutProfileObjectTypeResponse$TemplateId": "A unique identifier for the object template.
", "SearchProfilesRequest$DomainName": "The unique name of the domain.
", - "SearchProfilesRequest$KeyName": "A searchable identifier of a customer profile. The predefined keys you can use to search include: _account, _profileId, _fullName, _phone, _email, _ctrContactId, _marketoLeadId, _salesforceAccountId, _salesforceContactId, _zendeskUserId, _zendeskExternalId, _serviceNowSystemId.
", + "SearchProfilesRequest$KeyName": "A searchable identifier of a customer profile. The predefined keys you can use to search include: _account, _profileId, _assetId, _caseId, _orderId, _fullName, _phone, _email, _ctrContactId, _marketoLeadId, _salesforceAccountId, _salesforceContactId, _salesforceAssetId, _zendeskUserId, _zendeskExternalId, _zendeskTicketId, _serviceNowSystemId, _serviceNowIncidentId, _segmentUserId, _shopifyCustomerId, _shopifyOrderId.
", "UpdateDomainRequest$DomainName": "The unique name of the domain.
", "UpdateDomainResponse$DomainName": "The unique name of the domain.
", "UpdateProfileRequest$DomainName": "The unique name of the domain.
" @@ -714,9 +714,9 @@ } }, "ObjectFilter": { - "base": "The filter applied to ListProfileObjects response to include profile objects with the specified index values. This filter is only supported for ObjectTypeName _asset and _case.
", + "base": "The filter applied to ListProfileObjects response to include profile objects with the specified index values. This filter is only supported for ObjectTypeName _asset, _case and _order.
", "refs": { - "ListProfileObjectsRequest$ObjectFilter": "Applies a filter to the response to include profile objects with the specified index values. This filter is only supported for ObjectTypeName _asset and _case.
" + "ListProfileObjectsRequest$ObjectFilter": "Applies a filter to the response to include profile objects with the specified index values. This filter is only supported for ObjectTypeName _asset, _case and _order.
" } }, "ObjectTypeField": { @@ -737,6 +737,15 @@ "KeyMap$value": null } }, + "ObjectTypeNames": { + "base": null, + "refs": { + "GetIntegrationResponse$ObjectTypeNames": "A map in which each key is an event type from an external application such as Segment or Shopify, and each value is an ObjectTypeName
(template) used to ingest the event. It supports the following event types: SegmentIdentify
, ShopifyCreateCustomers
, ShopifyUpdateCustomers
, ShopifyCreateDraftOrders
, ShopifyUpdateDraftOrders
, ShopifyCreateOrders
, and ShopifyUpdatedOrders
.
A map in which each key is an event type from an external application such as Segment or Shopify, and each value is an ObjectTypeName
(template) used to ingest the event. It supports the following event types: SegmentIdentify
, ShopifyCreateCustomers
, ShopifyUpdateCustomers
, ShopifyCreateDraftOrders
, ShopifyUpdateDraftOrders
, ShopifyCreateOrders
, and ShopifyUpdatedOrders
.
A map in which each key is an event type from an external application such as Segment or Shopify, and each value is an ObjectTypeName
(template) used to ingest the event. It supports the following event types: SegmentIdentify
, ShopifyCreateCustomers
, ShopifyUpdateCustomers
, ShopifyCreateDraftOrders
, ShopifyUpdateDraftOrders
, ShopifyCreateOrders
, and ShopifyUpdatedOrders
.
A map in which each key is an event type from an external application such as Segment or Shopify, and each value is an ObjectTypeName
(template) used to ingest the event. It supports the following event types: SegmentIdentify
, ShopifyCreateCustomers
, ShopifyUpdateCustomers
, ShopifyCreateDraftOrders
, ShopifyUpdateDraftOrders
, ShopifyCreateOrders
, and ShopifyUpdatedOrders
.
The types of keys that a ProfileObject can have. Each ProfileObject can have only 1 UNIQUE key but multiple PROFILE keys. PROFILE, ASSET or CASE means that this key can be used to tie an object to a PROFILE, ASSET or CASE respectively. UNIQUE means that it can be used to uniquely identify an object. If a key a is marked as SECONDARY, it will be used to search for profiles after all other PROFILE keys have been searched. A LOOKUP_ONLY key is only used to match a profile but is not persisted to be used for searching of the profile. A NEW_ONLY key is only used if the profile does not already exist before the object is ingested, otherwise it is only used for matching objects to profiles.
" + "ObjectTypeKey$StandardIdentifiers": "The types of keys that a ProfileObject can have. Each ProfileObject can have only 1 UNIQUE key but multiple PROFILE keys. PROFILE, ASSET, CASE, or ORDER means that this key can be used to tie an object to a PROFILE, ASSET, CASE, or ORDER respectively. UNIQUE means that it can be used to uniquely identify an object. If a key a is marked as SECONDARY, it will be used to search for profiles after all other PROFILE keys have been searched. A LOOKUP_ONLY key is only used to match a profile but is not persisted to be used for searching of the profile. A NEW_ONLY key is only used if the profile does not already exist before the object is ingested, otherwise it is only used for matching objects to profiles.
" } }, "TagArn": { @@ -1333,6 +1342,7 @@ "ListProfileObjectsItem$ProfileObjectUniqueKey": "The unique identifier of the ProfileObject generated by the service.
", "MatchItem$MatchId": "The unique identifiers for this group of profiles that match.
", "MatchingAttributes$member": null, + "ObjectTypeNames$key": null, "Profile$AccountNumber": "A unique account number that you have given to the customer.
", "Profile$BusinessName": "The name of the customer’s business.
", "Profile$FirstName": "The customer’s first name.
", @@ -1448,6 +1458,7 @@ "ListProfileObjectTypeItem$ObjectTypeName": "The name of the profile object type.
", "ListProfileObjectsItem$ObjectTypeName": "Specifies the kind of object being added to a profile, such as \"Salesforce-Account.\"
", "ListProfileObjectsRequest$ObjectTypeName": "The name of the profile object type.
", + "ObjectTypeNames$value": null, "PutIntegrationRequest$ObjectTypeName": "The name of the profile object type.
", "PutIntegrationResponse$ObjectTypeName": "The name of the profile object type.
", "PutProfileObjectRequest$ObjectTypeName": "The name of the profile object type.
", diff --git a/models/apis/datasync/2018-11-09/api-2.json b/models/apis/datasync/2018-11-09/api-2.json index 70e4e313ee1..605d49a0e0b 100644 --- a/models/apis/datasync/2018-11-09/api-2.json +++ b/models/apis/datasync/2018-11-09/api-2.json @@ -53,6 +53,19 @@ {"shape":"InternalException"} ] }, + "CreateLocationFsxLustre":{ + "name":"CreateLocationFsxLustre", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLocationFsxLustreRequest"}, + "output":{"shape":"CreateLocationFsxLustreResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "CreateLocationFsxWindows":{ "name":"CreateLocationFsxWindows", "http":{ @@ -209,6 +222,19 @@ {"shape":"InternalException"} ] }, + "DescribeLocationFsxLustre":{ + "name":"DescribeLocationFsxLustre", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLocationFsxLustreRequest"}, + "output":{"shape":"DescribeLocationFsxLustreResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "DescribeLocationFsxWindows":{ "name":"DescribeLocationFsxWindows", "http":{ @@ -605,6 +631,25 @@ "LocationArn":{"shape":"LocationArn"} } }, + "CreateLocationFsxLustreRequest":{ + "type":"structure", + "required":[ + "FsxFilesystemArn", + "SecurityGroupArns" + ], + "members":{ + "FsxFilesystemArn":{"shape":"FsxFilesystemArn"}, + "SecurityGroupArns":{"shape":"Ec2SecurityGroupArnList"}, + "Subdirectory":{"shape":"FsxLustreSubdirectory"}, + "Tags":{"shape":"InputTagList"} + } + }, + "CreateLocationFsxLustreResponse":{ + "type":"structure", + "members":{ + "LocationArn":{"shape":"LocationArn"} + } + }, "CreateLocationFsxWindowsRequest":{ "type":"structure", "required":[ @@ -846,6 +891,22 @@ "CreationTime":{"shape":"Time"} } }, + "DescribeLocationFsxLustreRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{"shape":"LocationArn"} + } + }, + "DescribeLocationFsxLustreResponse":{ + "type":"structure", + "members":{ + "LocationArn":{"shape":"LocationArn"}, + "LocationUri":{"shape":"LocationUri"}, + "SecurityGroupArns":{"shape":"Ec2SecurityGroupArnList"}, + "CreationTime":{"shape":"Time"} + } + }, "DescribeLocationFsxWindowsRequest":{ "type":"structure", "required":["LocationArn"], @@ -1111,6 +1172,11 @@ "max":128, "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):fsx:[a-z\\-0-9]*:[0-9]{12}:file-system/fs-.*$" }, + "FsxLustreSubdirectory":{ + "type":"string", + "max":4096, + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$" + }, "FsxWindowsSubdirectory":{ "type":"string", "max":4096, @@ -1368,7 +1434,7 @@ "LocationUri":{ "type":"string", "max":4356, - "pattern":"^(efs|nfs|s3|smb|fsxw)://[a-zA-Z0-9.\\-]+$" + "pattern":"^(efs|nfs|s3|smb|fsxw|fsxl)://[a-zA-Z0-9.\\-]+$" }, "LogGroupArn":{ "type":"string", diff --git a/models/apis/datasync/2018-11-09/docs-2.json b/models/apis/datasync/2018-11-09/docs-2.json index 1b969a43700..e4731c38c59 100644 --- a/models/apis/datasync/2018-11-09/docs-2.json +++ b/models/apis/datasync/2018-11-09/docs-2.json @@ -5,6 +5,7 @@ "CancelTaskExecution": "Cancels execution of a task.
When you cancel a task execution, the transfer of some files is abruptly interrupted. The contents of files that are transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution on the same task and you allow the task execution to complete, file content on the destination is complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, DataSync successfully complete the transfer when you start the next task execution.
", "CreateAgent": "Activates an DataSync agent that you have deployed on your host. The activation process associates your agent with your account. In the activation process, you specify information such as the Amazon Web Services Region that you want to activate the agent in. You activate the agent in the Amazon Web Services Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web Services Region.
You can activate the agent in a VPC (virtual private cloud) or provide the agent access to a VPC endpoint so you can run tasks without going over the public internet.
You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run.
Agents are automatically updated by Amazon Web Services on a regular basis, using a mechanism that ensures minimal interruption to your tasks.
", "CreateLocationEfs": "Creates an endpoint for an Amazon EFS file system.
", + "CreateLocationFsxLustre": "Creates an endpoint for an Amazon FSx for Lustre file system.
", "CreateLocationFsxWindows": "Creates an endpoint for an Amazon FSx for Windows File Server file system.
", "CreateLocationHdfs": "Creates an endpoint for a Hadoop Distributed File System (HDFS).
", "CreateLocationNfs": "Defines a file system on a Network File System (NFS) server that can be read from or written to.
", @@ -17,6 +18,7 @@ "DeleteTask": "Deletes a task.
", "DescribeAgent": "Returns metadata such as the name, the network interfaces, and the status (that is, whether the agent is running or not) for an agent. To specify which agent to describe, use the Amazon Resource Name (ARN) of the agent in your request.
", "DescribeLocationEfs": "Returns metadata, such as the path information about an Amazon EFS location.
", + "DescribeLocationFsxLustre": "Returns metadata, such as the path information about an Amazon FSx for Lustre location.
", "DescribeLocationFsxWindows": "Returns metadata, such as the path information about an Amazon FSx for Windows File Server location.
", "DescribeLocationHdfs": "Returns metadata, such as the authentication information about the Hadoop Distributed File System (HDFS) location.
", "DescribeLocationNfs": "Returns metadata, such as the path information, about an NFS location.
", @@ -138,6 +140,16 @@ "refs": { } }, + "CreateLocationFsxLustreRequest": { + "base": null, + "refs": { + } + }, + "CreateLocationFsxLustreResponse": { + "base": null, + "refs": { + } + }, "CreateLocationFsxWindowsRequest": { "base": null, "refs": { @@ -258,6 +270,16 @@ "refs": { } }, + "DescribeLocationFsxLustreRequest": { + "base": null, + "refs": { + } + }, + "DescribeLocationFsxLustreResponse": { + "base": null, + "refs": { + } + }, "DescribeLocationFsxWindowsRequest": { "base": null, "refs": { @@ -370,7 +392,9 @@ "Ec2SecurityGroupArnList": { "base": null, "refs": { - "CreateLocationFsxWindowsRequest$SecurityGroupArns": "The Amazon Resource Names (ARNs) of the security groups that are to use to configure the FSx for Windows File Server file system.
", + "CreateLocationFsxLustreRequest$SecurityGroupArns": "The Amazon Resource Names (ARNs) of the security groups that are used to configure the FSx for Lustre file system.
", + "CreateLocationFsxWindowsRequest$SecurityGroupArns": "The Amazon Resource Names (ARNs) of the security groups that are used to configure the FSx for Windows File Server file system.
", + "DescribeLocationFsxLustreResponse$SecurityGroupArns": "The Amazon Resource Names (ARNs) of the security groups that are configured for the FSx for Lustre file system.
", "DescribeLocationFsxWindowsResponse$SecurityGroupArns": "The Amazon Resource Names (ARNs) of the security groups that are configured for the FSx for Windows File Server file system.
", "Ec2Config$SecurityGroupArns": "The Amazon Resource Names (ARNs) of the security groups that are configured for the Amazon EC2 resource.
" } @@ -455,13 +479,20 @@ "FsxFilesystemArn": { "base": null, "refs": { + "CreateLocationFsxLustreRequest$FsxFilesystemArn": "The Amazon Resource Name (ARN) for the FSx for Lustre file system.
", "CreateLocationFsxWindowsRequest$FsxFilesystemArn": "The Amazon Resource Name (ARN) for the FSx for Windows File Server file system.
" } }, + "FsxLustreSubdirectory": { + "base": null, + "refs": { + "CreateLocationFsxLustreRequest$Subdirectory": "A subdirectory in the location's path. This subdirectory in the FSx for Lustre file system is used to read data from the FSx for Lustre source location or write data to the FSx for Lustre destination.
" + } + }, "FsxWindowsSubdirectory": { "base": null, "refs": { - "CreateLocationFsxWindowsRequest$Subdirectory": "A subdirectory in the location’s path. This subdirectory in the Amazon FSx for Windows File Server file system is used to read data from the Amazon FSx for Windows File Server source location or write data to the FSx for Windows File Server destination.
" + "CreateLocationFsxWindowsRequest$Subdirectory": "A subdirectory in the location's path. This subdirectory in the Amazon FSx for Windows File Server file system is used to read data from the Amazon FSx for Windows File Server source location or write data to the FSx for Windows File Server destination.
" } }, "Gid": { @@ -558,6 +589,7 @@ "refs": { "CreateAgentRequest$Tags": "The key-value pair that represents the tag that you want to associate with the agent. The value can be an empty string. This value helps you manage, filter, and search for your agents.
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.
The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.
", + "CreateLocationFsxLustreRequest$Tags": "The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.
", "CreateLocationFsxWindowsRequest$Tags": "The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.
", "CreateLocationHdfsRequest$Tags": "The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.
", "CreateLocationNfsRequest$Tags": "The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.
", @@ -662,6 +694,7 @@ "base": null, "refs": { "CreateLocationEfsResponse$LocationArn": "The Amazon Resource Name (ARN) of the Amazon EFS file system location that is created.
", + "CreateLocationFsxLustreResponse$LocationArn": "The Amazon Resource Name (ARN) of the FSx for Lustre file system location that's created.
", "CreateLocationFsxWindowsResponse$LocationArn": "The Amazon Resource Name (ARN) of the FSx for Windows File Server file system location that is created.
", "CreateLocationHdfsResponse$LocationArn": "The ARN of the source HDFS cluster location that's created.
", "CreateLocationNfsResponse$LocationArn": "The Amazon Resource Name (ARN) of the source NFS file system location that is created.
", @@ -673,6 +706,8 @@ "DeleteLocationRequest$LocationArn": "The Amazon Resource Name (ARN) of the location to delete.
", "DescribeLocationEfsRequest$LocationArn": "The Amazon Resource Name (ARN) of the EFS location to describe.
", "DescribeLocationEfsResponse$LocationArn": "The Amazon Resource Name (ARN) of the EFS location that was described.
", + "DescribeLocationFsxLustreRequest$LocationArn": "The Amazon Resource Name (ARN) of the FSx for Lustre location to describe.
", + "DescribeLocationFsxLustreResponse$LocationArn": "The Amazon Resource Name (ARN) of the FSx for Lustre location that was described.
", "DescribeLocationFsxWindowsRequest$LocationArn": "The Amazon Resource Name (ARN) of the FSx for Windows File Server location to describe.
", "DescribeLocationFsxWindowsResponse$LocationArn": "The Amazon Resource Name (ARN) of the FSx for Windows File Server location that was described.
", "DescribeLocationHdfsRequest$LocationArn": "The Amazon Resource Name (ARN) of the HDFS cluster location to describe.
", @@ -728,13 +763,14 @@ "base": null, "refs": { "DescribeLocationEfsResponse$LocationUri": "The URL of the EFS location that was described.
", + "DescribeLocationFsxLustreResponse$LocationUri": "The URI of the FSx for Lustre location that was described.
", "DescribeLocationFsxWindowsResponse$LocationUri": "The URL of the FSx for Windows File Server location that was described.
", "DescribeLocationHdfsResponse$LocationUri": "The URI of the HDFS cluster location.
", "DescribeLocationNfsResponse$LocationUri": "The URL of the source NFS location that was described.
", "DescribeLocationObjectStorageResponse$LocationUri": "The URL of the source self-managed object storage server location that was described.
", "DescribeLocationS3Response$LocationUri": "The URL of the Amazon S3 location that was described.
", "DescribeLocationSmbResponse$LocationUri": "The URL of the source SMB location that was described.
", - "LocationListEntry$LocationUri": "Represents a list of URLs of a location. LocationUri
returns an array that contains a list of locations when the ListLocations operation is called.
Format: TYPE://GLOBAL_ID/SUBDIR
.
TYPE designates the type of location. Valid values: NFS | EFS | S3.
GLOBAL_ID is the globally unique identifier of the resource that backs the location. An example for EFS is us-east-2.fs-abcd1234
. An example for Amazon S3 is the bucket name, such as myBucket
. An example for NFS is a valid IPv4 address or a host name compliant with Domain Name Service (DNS).
SUBDIR is a valid file system path, delimited by forward slashes as is the *nix convention. For NFS and Amazon EFS, it's the export path to mount the location. For Amazon S3, it's the prefix path that you mount to and treat as the root of the location.
" + "LocationListEntry$LocationUri": "Represents a list of URIs of a location. LocationUri
returns an array that contains a list of locations when the ListLocations operation is called.
Format: TYPE://GLOBAL_ID/SUBDIR
.
TYPE designates the type of location. Valid values: NFS | EFS | S3.
GLOBAL_ID is the globally unique identifier of the resource that backs the location. An example for EFS is us-east-2.fs-abcd1234
. An example for Amazon S3 is the bucket name, such as myBucket
. An example for NFS is a valid IPv4 address or a host name compliant with Domain Name Service (DNS).
SUBDIR is a valid file system path, delimited by forward slashes as is the *nix convention. For NFS and Amazon EFS, it's the export path to mount the location. For Amazon S3, it's the prefix path that you mount to and treat as the root of the location.
" } }, "LogGroupArn": { @@ -1211,6 +1247,7 @@ "DescribeAgentResponse$LastConnectionTime": "The time that the agent last connected to DataSync.
", "DescribeAgentResponse$CreationTime": "The time that the agent was activated (that is, created in your account).
", "DescribeLocationEfsResponse$CreationTime": "The time that the EFS location was created.
", + "DescribeLocationFsxLustreResponse$CreationTime": "The time that the FSx for Lustre location was created.
", "DescribeLocationFsxWindowsResponse$CreationTime": "The time that the FSx for Windows File Server location was created.
", "DescribeLocationHdfsResponse$CreationTime": "The time that the HDFS location was created.
", "DescribeLocationNfsResponse$CreationTime": "The time that the NFS location was created.
", diff --git a/models/apis/devops-guru/2020-12-01/api-2.json b/models/apis/devops-guru/2020-12-01/api-2.json index 628ffd05b14..8052ff940a3 100644 --- a/models/apis/devops-guru/2020-12-01/api-2.json +++ b/models/apis/devops-guru/2020-12-01/api-2.json @@ -896,7 +896,8 @@ "CloudFormation":{"shape":"CloudFormationHealths"}, "Service":{"shape":"ServiceHealths"}, "Account":{"shape":"AccountHealths"}, - "NextToken":{"shape":"UuidNextToken"} + "NextToken":{"shape":"UuidNextToken"}, + "Tags":{"shape":"TagHealths"} } }, "DescribeResourceCollectionHealthRequest":{ @@ -1425,7 +1426,8 @@ "enum":[ "AWS_CLOUD_FORMATION", "AWS_SERVICE", - "AWS_ACCOUNT" + "AWS_ACCOUNT", + "AWS_TAGS" ] }, "OrganizationalUnitId":{ diff --git a/models/apis/devops-guru/2020-12-01/docs-2.json b/models/apis/devops-guru/2020-12-01/docs-2.json index 06742e53dd7..6c354bb6f7a 100644 --- a/models/apis/devops-guru/2020-12-01/docs-2.json +++ b/models/apis/devops-guru/2020-12-01/docs-2.json @@ -1705,6 +1705,7 @@ "TagHealths": { "base": null, "refs": { + "DescribeOrganizationResourceCollectionHealthResponse$Tags": "Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support tagging, so you can assign the same tag to resources from different services to indicate that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB table resource that you assign to an Lambda function. For more information about using tags, see the Tagging best practices whitepaper.
Each Amazon Web Services tag has two parts.
A tag key (for example, CostCenter
, Environment
, Project
, or Secret
). Tag keys are case-sensitive.
An optional field known as a tag value (for example, 111122223333
, Production
, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case-sensitive.
Together these are known as key-value pairs.
The string used for a key in a tag that you use to define your resource coverage must begin with the prefix Devops-guru-
. The tag key might be Devops-guru-deployment-application
or Devops-guru-rds-application
. While keys are case-sensitive, the case of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a key named devops-guru-rds
and a key named DevOps-Guru-RDS
. Possible key/value pairs in your application might be Devops-Guru-production-application/RDS
or Devops-Guru-production-application/containers
.
The Amazon Web Services tags that are used by resources in the resource collection.
Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support tagging, so you can assign the same tag to resources from different services to indicate that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB table resource that you assign to an Lambda function. For more information about using tags, see the Tagging best practices whitepaper.
Each Amazon Web Services tag has two parts.
A tag key (for example, CostCenter
, Environment
, Project
, or Secret
). Tag keys are case-sensitive.
An optional field known as a tag value (for example, 111122223333
, Production
, or a team name). Omitting the tag value is the same as using an empty string. Like tag keys, tag values are case-sensitive.
Together these are known as key-value pairs.
The string used for a key in a tag that you use to define your resource coverage must begin with the prefix Devops-guru-
. The tag key might be Devops-guru-deployment-application
or Devops-guru-rds-application
. While keys are case-sensitive, the case of key characters don't matter to DevOps Guru. For example, DevOps Guru works with a key named devops-guru-rds
and a key named DevOps-Guru-RDS
. Possible key/value pairs in your application might be Devops-Guru-production-application/RDS
or Devops-Guru-production-application/containers
.
The text of the error message.
" } }, + "ExportFileFormat": { + "base": "Data View Export File Format", + "refs": { + "DataViewDestinationTypeParams$s3DestinationExportFileFormat": null + } + }, "FormatParams": { "base": "Format Parameters of a Changeset", "refs": { @@ -529,6 +535,12 @@ "ListDatasetsRequest$maxResults": "The maximum number of results per page.
" } }, + "S3DestinationFormatOptions": { + "base": null, + "refs": { + "DataViewDestinationTypeParams$s3DestinationExportFileFormatOptions": null + } + }, "SchemaDefinition": { "base": "Definition for a schema on a tabular Dataset.
", "refs": { @@ -572,6 +584,7 @@ "base": null, "refs": { "FormatParams$key": null, + "S3DestinationFormatOptions$key": null, "SourceParams$key": null } }, @@ -579,6 +592,7 @@ "base": null, "refs": { "FormatParams$value": null, + "S3DestinationFormatOptions$value": null, "SourceParams$value": null } }, @@ -606,6 +620,7 @@ "refs": { "ChangesetSummary$createTime": "The timestamp at which the Changeset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", "ChangesetSummary$activeUntilTimestamp": "Time until which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", + "ChangesetSummary$activeFromTimestamp": null, "CreateDataViewRequest$asOfTimestamp": "Beginning time to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", "DataViewSummary$asOfTimestamp": "Time range to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", "DataViewSummary$createTime": "The timestamp at which the Dataview was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", @@ -614,6 +629,7 @@ "Dataset$lastModifiedTime": "The last time that the Dataset was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", "GetChangesetResponse$createTime": "The timestamp at which the Changeset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", "GetChangesetResponse$activeUntilTimestamp": "Time until which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", + "GetChangesetResponse$activeFromTimestamp": null, "GetDataViewResponse$asOfTimestamp": "Time range to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", "GetDataViewResponse$lastModifiedTime": "The last time that a Dataview was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", "GetDataViewResponse$createTime": "The timestamp at which the Dataview was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
", diff --git a/models/apis/forecast/2018-06-26/api-2.json b/models/apis/forecast/2018-06-26/api-2.json index 7155f2a1ae7..38185c2f2ee 100644 --- a/models/apis/forecast/2018-06-26/api-2.json +++ b/models/apis/forecast/2018-06-26/api-2.json @@ -1147,6 +1147,7 @@ "ForecastHorizon":{"shape":"Integer"}, "ForecastTypes":{"shape":"ForecastTypes"}, "ForecastFrequency":{"shape":"Frequency"}, + "ForecastDimensions":{"shape":"ForecastDimensions"}, "DatasetImportJobArns":{"shape":"ArnList"}, "DataConfig":{"shape":"DataConfig"}, "EncryptionConfig":{"shape":"EncryptionConfig"}, diff --git a/models/apis/forecast/2018-06-26/docs-2.json b/models/apis/forecast/2018-06-26/docs-2.json index 7fd37ffdc90..706f9e88777 100644 --- a/models/apis/forecast/2018-06-26/docs-2.json +++ b/models/apis/forecast/2018-06-26/docs-2.json @@ -2,24 +2,24 @@ "version": "2.0", "service": "Provides APIs for creating and managing Amazon Forecast resources.
", "operations": { - "CreateAutoPredictor": "Creates an Amazon Forecast predictor.
Amazon Forecast creates predictors with AutoPredictor, which involves applying the optimal combination of algorithms to each time series in your datasets. You can use CreateAutoPredictor to create new predictors or upgrade/retrain existing predictors.
Creating new predictors
The following parameters are required when creating a new predictor:
PredictorName
- A unique name for the predictor.
DatasetGroupArn
- The ARN of the dataset group used to train the predictor.
ForecastFrequency
- The granularity of your forecasts (hourly, daily, weekly, etc).
ForecastHorizon
- The number of time steps being forecasted.
When creating a new predictor, do not specify a value for ReferencePredictorArn
.
Upgrading and retraining predictors
The following parameters are required when retraining or upgrading a predictor:
PredictorName
- A unique name for the predictor.
ReferencePredictorArn
- The ARN of the predictor to retrain or upgrade.
When upgrading or retraining a predictor, only specify values for the ReferencePredictorArn
and PredictorName
.
Creates an Amazon Forecast predictor.
Amazon Forecast creates predictors with AutoPredictor, which involves applying the optimal combination of algorithms to each time series in your datasets. You can use CreateAutoPredictor to create new predictors or upgrade/retrain existing predictors.
Creating new predictors
The following parameters are required when creating a new predictor:
PredictorName
- A unique name for the predictor.
DatasetGroupArn
- The ARN of the dataset group used to train the predictor.
ForecastFrequency
- The granularity of your forecasts (hourly, daily, weekly, etc).
ForecastHorizon
- The number of time steps being forecasted.
When creating a new predictor, do not specify a value for ReferencePredictorArn
.
Upgrading and retraining predictors
The following parameters are required when retraining or upgrading a predictor:
PredictorName
- A unique name for the predictor.
ReferencePredictorArn
- The ARN of the predictor to retrain or upgrade.
When upgrading or retraining a predictor, only specify values for the ReferencePredictorArn
and PredictorName
.
Creates an Amazon Forecast dataset. The information about the dataset that you provide helps Forecast understand how to consume the data for model training. This includes the following:
DataFrequency
- How frequently your historical time-series data is collected.
Domain
and DatasetType
- Each dataset has an associated dataset domain and a type within the domain. Amazon Forecast provides a list of predefined domains and types within each domain. For each unique dataset domain and type within the domain, Amazon Forecast requires your data to include a minimum set of predefined fields.
Schema
- A schema specifies the fields in the dataset, including the field name and data type.
After creating a dataset, you import your training data into it and add the dataset to a dataset group. You use the dataset group to create a predictor. For more information, see howitworks-datasets-groups.
To get a list of all your datasets, use the ListDatasets operation.
For example Forecast datasets, see the Amazon Forecast Sample GitHub repository.
The Status
of a dataset must be ACTIVE
before you can import training data. Use the DescribeDataset operation to get the status.
Creates a dataset group, which holds a collection of related datasets. You can add datasets to the dataset group when you create the dataset group, or later by using the UpdateDatasetGroup operation.
After creating a dataset group and adding datasets, you use the dataset group when you create a predictor. For more information, see howitworks-datasets-groups.
To get a list of all your datasets groups, use the ListDatasetGroups operation.
The Status
of a dataset group must be ACTIVE
before you can use the dataset group to create a predictor. To get the status, use the DescribeDatasetGroup operation.
Imports your training data to an Amazon Forecast dataset. You provide the location of your training data in an Amazon Simple Storage Service (Amazon S3) bucket and the Amazon Resource Name (ARN) of the dataset that you want to import the data to.
You must specify a DataSource object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data, as Amazon Forecast makes a copy of your data and processes it in an internal AWS system. For more information, see aws-forecast-iam-roles.
The training data must be in CSV format. The delimiter must be a comma (,).
You can specify the path to a specific CSV file, the S3 bucket, or to a folder in the S3 bucket. For the latter two cases, Amazon Forecast imports all files up to the limit of 10,000 files.
Because dataset imports are not aggregated, your most recent dataset import is the one that is used when training a predictor or generating a forecast. Make sure that your most recent dataset import contains all of the data you want to model off of, and not just the new data collected since the previous import.
To get a list of all your dataset import jobs, filtered by specified criteria, use the ListDatasetImportJobs operation.
", - "CreateExplainability": "Explainability is only available for Forecasts and Predictors generated from an AutoPredictor (CreateAutoPredictor)
Creates an Amazon Forecast Explainability.
Explainability helps you better understand how the attributes in your datasets impact forecast. Amazon Forecast uses a metric called Impact scores to quantify the relative impact of each attribute and determine whether they increase or decrease forecast values.
To enable Forecast Explainability, your predictor must include at least one of the following: related time series, item metadata, or additional datasets like Holidays and the Weather Index.
CreateExplainability accepts either a Predictor ARN or Forecast ARN. To receive aggregated Impact scores for all time series and time points in your datasets, provide a Predictor ARN. To receive Impact scores for specific time series and time points, provide a Forecast ARN.
CreateExplainability with a Predictor ARN
You can only have one Explainability resource per predictor. If you already enabled ExplainPredictor
in CreateAutoPredictor, that predictor already has an Explainability resource.
The following parameters are required when providing a Predictor ARN:
ExplainabilityName
- A unique name for the Explainability.
ResourceArn
- The Arn of the predictor.
TimePointGranularity
- Must be set to “ALL”.
TimeSeriesGranularity
- Must be set to “ALL”.
Do not specify a value for the following parameters:
DataSource
- Only valid when TimeSeriesGranularity is “SPECIFIC”.
Schema
- Only valid when TimeSeriesGranularity is “SPECIFIC”.
StartDateTime
- Only valid when TimePointGranularity is “SPECIFIC”.
EndDateTime
- Only valid when TimePointGranularity is “SPECIFIC”.
CreateExplainability with a Forecast ARN
You can specify a maximum of 50 time series and 1500 time points.
The following parameters are required when providing a Predictor ARN:
ExplainabilityName
- A unique name for the Explainability.
ResourceArn
- The Arn of the forecast.
TimePointGranularity
- Either “ALL” or “SPECIFIC”.
TimeSeriesGranularity
- Either “ALL” or “SPECIFIC”.
If you set TimeSeriesGranularity to “SPECIFIC”, you must also provide the following:
DataSource
- The S3 location of the CSV file specifying your time series.
Schema
- The Schema defines the attributes and attribute types listed in the Data Source.
If you set TimePointGranularity to “SPECIFIC”, you must also provide the following:
StartDateTime
- The first timestamp in the range of time points.
EndDateTime
- The last timestamp in the range of time points.
Explainability is only available for Forecasts and Predictors generated from an AutoPredictor (CreateAutoPredictor)
Creates an Amazon Forecast Explainability.
Explainability helps you better understand how the attributes in your datasets impact forecast. Amazon Forecast uses a metric called Impact scores to quantify the relative impact of each attribute and determine whether they increase or decrease forecast values.
To enable Forecast Explainability, your predictor must include at least one of the following: related time series, item metadata, or additional datasets like Holidays and the Weather Index.
CreateExplainability accepts either a Predictor ARN or Forecast ARN. To receive aggregated Impact scores for all time series and time points in your datasets, provide a Predictor ARN. To receive Impact scores for specific time series and time points, provide a Forecast ARN.
CreateExplainability with a Predictor ARN
You can only have one Explainability resource per predictor. If you already enabled ExplainPredictor
in CreateAutoPredictor, that predictor already has an Explainability resource.
The following parameters are required when providing a Predictor ARN:
ExplainabilityName
- A unique name for the Explainability.
ResourceArn
- The Arn of the predictor.
TimePointGranularity
- Must be set to “ALL”.
TimeSeriesGranularity
- Must be set to “ALL”.
Do not specify a value for the following parameters:
DataSource
- Only valid when TimeSeriesGranularity is “SPECIFIC”.
Schema
- Only valid when TimeSeriesGranularity is “SPECIFIC”.
StartDateTime
- Only valid when TimePointGranularity is “SPECIFIC”.
EndDateTime
- Only valid when TimePointGranularity is “SPECIFIC”.
CreateExplainability with a Forecast ARN
You can specify a maximum of 50 time series and 500 time points.
The following parameters are required when providing a Predictor ARN:
ExplainabilityName
- A unique name for the Explainability.
ResourceArn
- The Arn of the forecast.
TimePointGranularity
- Either “ALL” or “SPECIFIC”.
TimeSeriesGranularity
- Either “ALL” or “SPECIFIC”.
If you set TimeSeriesGranularity to “SPECIFIC”, you must also provide the following:
DataSource
- The S3 location of the CSV file specifying your time series.
Schema
- The Schema defines the attributes and attribute types listed in the Data Source.
If you set TimePointGranularity to “SPECIFIC”, you must also provide the following:
StartDateTime
- The first timestamp in the range of time points.
EndDateTime
- The last timestamp in the range of time points.
Exports an Explainability resource created by the CreateExplainability operation. Exported files are exported to an Amazon Simple Storage Service (Amazon S3) bucket.
You must specify a DataDestination object that includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.
The Status
of the export job must be ACTIVE
before you can access the export in your Amazon S3 bucket. To get the status, use the DescribeExplainabilityExport operation.
Creates a forecast for each item in the TARGET_TIME_SERIES
dataset that was used to train the predictor. This is known as inference. To retrieve the forecast for a single item at low latency, use the operation. To export the complete forecast into your Amazon Simple Storage Service (Amazon S3) bucket, use the CreateForecastExportJob operation.
The range of the forecast is determined by the ForecastHorizon
value, which you specify in the CreatePredictor request. When you query a forecast, you can request a specific date range within the forecast.
To get a list of all your forecasts, use the ListForecasts operation.
The forecasts generated by Amazon Forecast are in the same time zone as the dataset that was used to create the predictor.
For more information, see howitworks-forecast.
The Status
of the forecast must be ACTIVE
before you can query or export the forecast. Use the DescribeForecast operation to get the status.
Exports a forecast created by the CreateForecast operation to your Amazon Simple Storage Service (Amazon S3) bucket. The forecast file name will match the following conventions:
<ForecastExportJobName>_<ExportTimestamp>_<PartNumber>
where the <ExportTimestamp> component is in Java SimpleDateFormat (yyyy-MM-ddTHH-mm-ssZ).
You must specify a DataDestination object that includes an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.
For more information, see howitworks-forecast.
To get a list of all your forecast export jobs, use the ListForecastExportJobs operation.
The Status
of the forecast export job must be ACTIVE
before you can access the forecast in your Amazon S3 bucket. To get the status, use the DescribeForecastExportJob operation.
This operation creates a legacy predictor that does not include all the predictor functionalities provided by Amazon Forecast. To create a predictor that is compatible with all aspects of Forecast, use CreateAutoPredictor.
Creates an Amazon Forecast predictor.
In the request, provide a dataset group and either specify an algorithm or let Amazon Forecast choose an algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.
Amazon Forecast uses the algorithm to train a predictor using the latest version of the datasets in the specified dataset group. You can then generate a forecast using the CreateForecast operation.
To see the evaluation metrics, use the GetAccuracyMetrics operation.
You can specify a featurization configuration to fill and aggregate the data fields in the TARGET_TIME_SERIES
dataset to improve model training. For more information, see FeaturizationConfig.
For RELATED_TIME_SERIES datasets, CreatePredictor
verifies that the DataFrequency
specified when the dataset was created matches the ForecastFrequency
. TARGET_TIME_SERIES datasets don't have this restriction. Amazon Forecast also verifies the delimiter and timestamp format. For more information, see howitworks-datasets-groups.
By default, predictors are trained and evaluated at the 0.1 (P10), 0.5 (P50), and 0.9 (P90) quantiles. You can choose custom forecast types to train and evaluate your predictor by setting the ForecastTypes
.
AutoML
If you want Amazon Forecast to evaluate each algorithm and choose the one that minimizes the objective function
, set PerformAutoML
to true
. The objective function
is defined as the mean of the weighted losses over the forecast types. By default, these are the p10, p50, and p90 quantile losses. For more information, see EvaluationResult.
When AutoML is enabled, the following properties are disallowed:
AlgorithmArn
HPOConfig
PerformHPO
TrainingParameters
To get a list of all of your predictors, use the ListPredictors operation.
Before you can use the predictor to create a forecast, the Status
of the predictor must be ACTIVE
, signifying that training has completed. To get the status, use the DescribePredictor operation.
Exports backtest forecasts and accuracy metrics generated by the CreatePredictor operation. Two folders containing CSV files are exported to your specified S3 bucket.
The export file names will match the following conventions:
<ExportJobName>_<ExportTimestamp>_<PartNumber>.csv
The <ExportTimestamp> component is in Java SimpleDate format (yyyy-MM-ddTHH-mm-ssZ).
You must specify a DataDestination object that includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.
The Status
of the export job must be ACTIVE
before you can access the export in your Amazon S3 bucket. To get the status, use the DescribePredictorBacktestExportJob operation.
This operation creates a legacy predictor that does not include all the predictor functionalities provided by Amazon Forecast. To create a predictor that is compatible with all aspects of Forecast, use CreateAutoPredictor.
Creates an Amazon Forecast predictor.
In the request, provide a dataset group and either specify an algorithm or let Amazon Forecast choose an algorithm for you using AutoML. If you specify an algorithm, you also can override algorithm-specific hyperparameters.
Amazon Forecast uses the algorithm to train a predictor using the latest version of the datasets in the specified dataset group. You can then generate a forecast using the CreateForecast operation.
To see the evaluation metrics, use the GetAccuracyMetrics operation.
You can specify a featurization configuration to fill and aggregate the data fields in the TARGET_TIME_SERIES
dataset to improve model training. For more information, see FeaturizationConfig.
For RELATED_TIME_SERIES datasets, CreatePredictor
verifies that the DataFrequency
specified when the dataset was created matches the ForecastFrequency
. TARGET_TIME_SERIES datasets don't have this restriction. Amazon Forecast also verifies the delimiter and timestamp format. For more information, see howitworks-datasets-groups.
By default, predictors are trained and evaluated at the 0.1 (P10), 0.5 (P50), and 0.9 (P90) quantiles. You can choose custom forecast types to train and evaluate your predictor by setting the ForecastTypes
.
AutoML
If you want Amazon Forecast to evaluate each algorithm and choose the one that minimizes the objective function
, set PerformAutoML
to true
. The objective function
is defined as the mean of the weighted losses over the forecast types. By default, these are the p10, p50, and p90 quantile losses. For more information, see EvaluationResult.
When AutoML is enabled, the following properties are disallowed:
AlgorithmArn
HPOConfig
PerformHPO
TrainingParameters
To get a list of all of your predictors, use the ListPredictors operation.
Before you can use the predictor to create a forecast, the Status
of the predictor must be ACTIVE
, signifying that training has completed. To get the status, use the DescribePredictor operation.
Exports backtest forecasts and accuracy metrics generated by the CreateAutoPredictor or CreatePredictor operations. Two folders containing CSV files are exported to your specified S3 bucket.
The export file names will match the following conventions:
<ExportJobName>_<ExportTimestamp>_<PartNumber>.csv
The <ExportTimestamp> component is in Java SimpleDate format (yyyy-MM-ddTHH-mm-ssZ).
You must specify a DataDestination object that includes an Amazon S3 bucket and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket. For more information, see aws-forecast-iam-roles.
The Status
of the export job must be ACTIVE
before you can access the export in your Amazon S3 bucket. To get the status, use the DescribePredictorBacktestExportJob operation.
Deletes an Amazon Forecast dataset that was created using the CreateDataset operation. You can only delete datasets that have a status of ACTIVE
or CREATE_FAILED
. To get the status use the DescribeDataset operation.
Forecast does not automatically update any dataset groups that contain the deleted dataset. In order to update the dataset group, use the operation, omitting the deleted dataset's ARN.
Deletes a dataset group created using the CreateDatasetGroup operation. You can only delete dataset groups that have a status of ACTIVE
, CREATE_FAILED
, or UPDATE_FAILED
. To get the status, use the DescribeDatasetGroup operation.
This operation deletes only the dataset group, not the datasets in the group.
", "DeleteDatasetImportJob": "Deletes a dataset import job created using the CreateDatasetImportJob operation. You can delete only dataset import jobs that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribeDatasetImportJob operation.
Deletes an Explainability resource.
You can delete only predictor that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribeExplainability operation.
Deletes an Explainability export job.
", + "DeleteExplainabilityExport": "Deletes an Explainability export.
", "DeleteForecast": "Deletes a forecast created using the CreateForecast operation. You can delete only forecasts that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribeForecast operation.
You can't delete a forecast while it is being exported. After a forecast is deleted, you can no longer query the forecast.
", "DeleteForecastExportJob": "Deletes a forecast export job created using the CreateForecastExportJob operation. You can delete only export jobs that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribeForecastExportJob operation.
Deletes a predictor created using the CreatePredictor operation. You can delete only predictor that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribePredictor operation.
Deletes a predictor created using the DescribePredictor or CreatePredictor operations. You can delete only predictor that have a status of ACTIVE
or CREATE_FAILED
. To get the status, use the DescribePredictor operation.
Deletes a predictor backtest export job.
", "DeleteResourceTree": "Deletes an entire resource tree. This operation will delete the parent resource and its child resources.
Child resources are resources that were created from another resource. For example, when a forecast is generated from a predictor, the forecast is the child resource and the predictor is the parent resource.
Amazon Forecast resources possess the following parent-child resource hierarchies:
Dataset: dataset import jobs
Dataset Group: predictors, predictor backtest export jobs, forecasts, forecast export jobs
Predictor: predictor backtest export jobs, forecasts, forecast export jobs
Forecast: forecast export jobs
DeleteResourceTree
will only delete Amazon Forecast resources, and will not delete datasets or exported files stored in Amazon S3.
Describes a predictor created using the CreateAutoPredictor operation.
", @@ -30,7 +30,7 @@ "DescribeExplainabilityExport": "Describes an Explainability export created using the CreateExplainabilityExport operation.
", "DescribeForecast": "Describes a forecast created using the CreateForecast operation.
In addition to listing the properties provided in the CreateForecast
request, this operation lists the following properties:
DatasetGroupArn
- The dataset group that provided the training data.
CreationTime
LastModificationTime
Status
Message
- If an error occurred, information about the error.
Describes a forecast export job created using the CreateForecastExportJob operation.
In addition to listing the properties provided by the user in the CreateForecastExportJob
request, this operation lists the following properties:
CreationTime
LastModificationTime
Status
Message
- If an error occurred, information about the error.
This operation is only valid for legacy predictors created with CreatePredictor. If you are not using a legacy predictor, use DescribeAutoPredictor.
To upgrade a legacy predictor to AutoPredictor, see Upgrading to AutoPredictor.
Describes a predictor created using the CreatePredictor operation.
In addition to listing the properties provided in the CreatePredictor
request, this operation lists the following properties:
DatasetImportJobArns
- The dataset import jobs used to import training data.
AutoMLAlgorithmArns
- If AutoML is performed, the algorithms that were evaluated.
CreationTime
LastModificationTime
Status
Message
- If an error occurred, information about the error.
This operation is only valid for legacy predictors created with CreatePredictor. If you are not using a legacy predictor, use DescribeAutoPredictor.
Describes a predictor created using the CreatePredictor operation.
In addition to listing the properties provided in the CreatePredictor
request, this operation lists the following properties:
DatasetImportJobArns
- The dataset import jobs used to import training data.
AutoMLAlgorithmArns
- If AutoML is performed, the algorithms that were evaluated.
CreationTime
LastModificationTime
Status
Message
- If an error occurred, information about the error.
Describes a predictor backtest export job created using the CreatePredictorBacktestExportJob operation.
In addition to listing the properties provided by the user in the CreatePredictorBacktestExportJob
request, this operation lists the following properties:
CreationTime
LastModificationTime
Status
Message
(if an error occurred)
Provides metrics on the accuracy of the models that were trained by the CreatePredictor operation. Use metrics to see how well the model performed and to decide whether to use the predictor to generate a forecast. For more information, see Predictor Metrics.
This operation generates metrics for each backtest window that was evaluated. The number of backtest windows (NumberOfBacktestWindows
) is specified using the EvaluationParameters object, which is optionally included in the CreatePredictor
request. If NumberOfBacktestWindows
isn't specified, the number defaults to one.
The parameters of the filling
method determine which items contribute to the metrics. If you want all items to contribute, specify zero
. If you want only those items that have complete data in the range being evaluated to contribute, specify nan
. For more information, see FeaturizationMethod.
Before you can get accuracy metrics, the Status
of the predictor must be ACTIVE
, signifying that training has completed. To get the status, use the DescribePredictor operation.
Returns a list of dataset groups created using the CreateDatasetGroup operation. For each dataset group, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the dataset group ARN with the DescribeDatasetGroup operation.
", @@ -41,9 +41,9 @@ "ListForecastExportJobs": "Returns a list of forecast export jobs created using the CreateForecastExportJob operation. For each forecast export job, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, use the ARN with the DescribeForecastExportJob operation. You can filter the list using an array of Filter objects.
", "ListForecasts": "Returns a list of forecasts created using the CreateForecast operation. For each forecast, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). To retrieve the complete set of properties, specify the ARN with the DescribeForecast operation. You can filter the list using an array of Filter objects.
", "ListPredictorBacktestExportJobs": "Returns a list of predictor backtest export jobs created using the CreatePredictorBacktestExportJob operation. This operation returns a summary for each backtest export job. You can filter the list using an array of Filter objects.
To retrieve the complete set of properties for a particular backtest export job, use the ARN with the DescribePredictorBacktestExportJob operation.
", - "ListPredictors": "Returns a list of predictors created using the CreatePredictor operation. For each predictor, this operation returns a summary of its properties, including its Amazon Resource Name (ARN). You can retrieve the complete set of properties by using the ARN with the DescribePredictor operation. You can filter the list using an array of Filter objects.
", + "ListPredictors": "Returns a list of predictors created using the CreateAutoPredictor or CreatePredictor operations. For each predictor, this operation returns a summary of its properties, including its Amazon Resource Name (ARN).
You can retrieve the complete set of properties by using the ARN with the DescribeAutoPredictor and DescribePredictor operations. You can filter the list using an array of Filter objects.
", "ListTagsForResource": "Lists the tags for an Amazon Forecast resource.
", - "StopResource": "Stops a resource.
The resource undergoes the following states: CREATE_STOPPING
and CREATE_STOPPED
. You cannot resume a resource once it has been stopped.
This operation can be applied to the following resources (and their corresponding child resources):
Dataset Import Job
Predictor Job
Forecast Job
Forecast Export Job
Predictor Backtest Export Job
Stops a resource.
The resource undergoes the following states: CREATE_STOPPING
and CREATE_STOPPED
. You cannot resume a resource once it has been stopped.
This operation can be applied to the following resources (and their corresponding child resources):
Dataset Import Job
Predictor Job
Forecast Job
Forecast Export Job
Predictor Backtest Export Job
Explainability Job
Explainability Export Job
Associates the specified tags to a resource with the specified resourceArn
. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are also deleted.
Deletes the specified tags from a resource.
", "UpdateDatasetGroup": "Replaces the datasets in a dataset group with the specified datasets.
The Status
of the dataset group must be ACTIVE
before you can use the dataset group to create a predictor. Use the DescribeDatasetGroup operation to get the status.
The ARN of the IAM role that Amazon Forecast can assume to access the AWS KMS key.
Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an InvalidInputException
error.
The Amazon Resource Name (ARN) of the algorithm that was evaluated.
", "ExplainabilityExportSummary$ExplainabilityExportArn": "The Amazon Resource Name (ARN) of the Explainability export.
", - "ExplainabilityInfo$ExplainabilityArn": null, + "ExplainabilityInfo$ExplainabilityArn": "The Amazon Resource Name (ARN) of the Explainability.
", "ExplainabilitySummary$ExplainabilityArn": "The Amazon Resource Name (ARN) of the Explainability.
", "ExplainabilitySummary$ResourceArn": "The Amazon Resource Name (ARN) of the Predictor or Forecast used to create the Explainability.
", "Filter$Value": "The value to match.
", @@ -135,16 +135,16 @@ "ForecastSummary$ForecastArn": "The ARN of the forecast.
", "GetAccuracyMetricsRequest$PredictorArn": "The Amazon Resource Name (ARN) of the predictor to get metrics for.
", "InputDataConfig$DatasetGroupArn": "The Amazon Resource Name (ARN) of the dataset group.
", - "ListTagsForResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast export jobs.
", + "ListTagsForResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) that identifies the resource for which to list the tags.
", "PredictorBacktestExportJobSummary$PredictorBacktestExportJobArn": "The Amazon Resource Name (ARN) of the predictor backtest export job.
", "PredictorExecution$AlgorithmArn": "The ARN of the algorithm used to test the predictor.
", "PredictorSummary$PredictorArn": "The ARN of the predictor.
", "PredictorSummary$DatasetGroupArn": "The Amazon Resource Name (ARN) of the dataset group that contains the data used to train the predictor.
", "ReferencePredictorSummary$Arn": "The ARN of the reference predictor.
", "S3Config$RoleArn": "The ARN of the AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the Amazon S3 bucket or files. If you provide a value for the KMSKeyArn
key, the role must allow access to the key.
Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an InvalidInputException
error.
The Amazon Resource Name (ARN) that identifies the resource to stop. The supported ARNs are DatasetImportJobArn
, PredictorArn
, PredictorBacktestExportJobArn
, ForecastArn
, and ForecastExportJobArn
.
The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast export jobs.
", - "UntagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Forecast dataset groups, datasets, dataset import jobs, predictors, forecasts, and forecast exports.
", + "StopResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) that identifies the resource to stop. The supported ARNs are DatasetImportJobArn
, PredictorArn
, PredictorBacktestExportJobArn
, ForecastArn
, ForecastExportJobArn
, ExplainabilityArn
, and ExplainabilityExportArn
.
The Amazon Resource Name (ARN) that identifies the resource for which to list the tags.
", + "UntagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) that identifies the resource for which to list the tags.
", "UpdateDatasetGroupRequest$DatasetGroupArn": "The ARN of the dataset group.
" } }, @@ -188,7 +188,7 @@ "Boolean": { "base": null, "refs": { - "CreateAutoPredictorRequest$ExplainPredictor": null, + "CreateAutoPredictorRequest$ExplainPredictor": "Create an Explainability resource for the predictor.
", "CreateExplainabilityRequest$EnableVisualization": "Create an Expainability visualization that is viewable within the AWS console.
", "CreatePredictorRequest$PerformAutoML": "Whether to perform AutoML. When Amazon Forecast performs AutoML, it evaluates the algorithms it provides and chooses the best algorithm and configuration for your training dataset.
The default value is false
. In this case, you are required to specify an algorithm.
Set PerformAutoML
to true
to have Amazon Forecast perform AutoML. This is a good option if you aren't sure which algorithm is suitable for your training data. In this case, PerformHPO
must be false.
Whether to perform hyperparameter optimization (HPO). HPO finds optimal hyperparameter values for your training data. The process of performing HPO is known as running a hyperparameter tuning job.
The default value is false
. In this case, Amazon Forecast uses default hyperparameter values from the chosen algorithm.
To override the default values, set PerformHPO
to true
and, optionally, supply the HyperParameterTuningJobConfig object. The tuning job specifies a metric to optimize, which hyperparameters participate in tuning, and the valid range for each tunable hyperparameter. In this case, you are required to specify an algorithm and PerformAutoML
must be false.
The following algorithms support HPO:
DeepAR+
CNN-QR
Whether the predictor is set to perform AutoML.
", "DescribePredictorResponse$PerformHPO": "Whether the predictor is set to perform hyperparameter optimization (HPO).
", "DescribePredictorResponse$IsAutoPredictor": "Whether the predictor was created with CreateAutoPredictor.
", - "ForecastSummary$CreatedUsingAutoPredictor": null, + "ForecastSummary$CreatedUsingAutoPredictor": "Whether the Forecast was created from an AutoPredictor.
", "GetAccuracyMetricsResponse$IsAutoPredictor": "Whether the predictor was created with CreateAutoPredictor.
", "PredictorSummary$IsAutoPredictor": "Whether AutoPredictor was used to create the predictor.
" } @@ -216,7 +216,7 @@ "Configuration": { "base": null, "refs": { - "AdditionalDataset$Configuration": "Weather Index
To enable the Weather Index, do not specify a value for Configuration
.
Holidays
To enable Holidays, specify a country with one of the following two-letter country codes:
\"AL\" - ALBANIA
\"AR\" - ARGENTINA
\"AT\" - AUSTRIA
\"AU\" - AUSTRALIA
\"BA\" - BOSNIA HERZEGOVINA
\"BE\" - BELGIUM
\"BG\" - BULGARIA
\"BO\" - BOLIVIA
\"BR\" - BRAZIL
\"BY\" - BELARUS
\"CA\" - CANADA
\"CL\" - CHILE
\"CO\" - COLOMBIA
\"CR\" - COSTA RICA
\"HR\" - CROATIA
\"CZ\" - CZECH REPUBLIC
\"DK\" - DENMARK
\"EC\" - ECUADOR
\"EE\" - ESTONIA
\"ET\" - ETHIOPIA
\"FI\" - FINLAND
\"FR\" - FRANCE
\"DE\" - GERMANY
\"GR\" - GREECE
\"HU\" - HUNGARY
\"IS\" - ICELAND
\"IN\" - INDIA
\"IE\" - IRELAND
\"IT\" - ITALY
\"JP\" - JAPAN
\"KZ\" - KAZAKHSTAN
\"KR\" - KOREA
\"LV\" - LATVIA
\"LI\" - LIECHTENSTEIN
\"LT\" - LITHUANIA
\"LU\" - LUXEMBOURG
\"MK\" - MACEDONIA
\"MT\" - MALTA
\"MX\" - MEXICO
\"MD\" - MOLDOVA
\"ME\" - MONTENEGRO
\"NL\" - NETHERLANDS
\"NZ\" - NEW ZEALAND
\"NI\" - NICARAGUA
\"NG\" - NIGERIA
\"NO\" - NORWAY
\"PA\" - PANAMA
\"PY\" - PARAGUAY
\"PE\" - PERU
\"PL\" - POLAND
\"PT\" - PORTUGAL
\"RO\" - ROMANIA
\"RU\" - RUSSIA
\"RS\" - SERBIA
\"SK\" - SLOVAKIA
\"SI\" - SLOVENIA
\"ZA\" - SOUTH AFRICA
\"ES\" - SPAIN
\"SE\" - SWEDEN
\"CH\" - SWITZERLAND
\"UA\" - UKRAINE
\"AE\" - UNITED ARAB EMIRATES
\"US\" - UNITED STATES
\"UK\" - UNITED KINGDOM
\"UY\" - URUGUAY
\"VE\" - VENEZUELA
Weather Index
To enable the Weather Index, do not specify a value for Configuration
.
Holidays
To enable Holidays, set CountryCode
to one of the following two-letter country codes:
\"AL\" - ALBANIA
\"AR\" - ARGENTINA
\"AT\" - AUSTRIA
\"AU\" - AUSTRALIA
\"BA\" - BOSNIA HERZEGOVINA
\"BE\" - BELGIUM
\"BG\" - BULGARIA
\"BO\" - BOLIVIA
\"BR\" - BRAZIL
\"BY\" - BELARUS
\"CA\" - CANADA
\"CL\" - CHILE
\"CO\" - COLOMBIA
\"CR\" - COSTA RICA
\"HR\" - CROATIA
\"CZ\" - CZECH REPUBLIC
\"DK\" - DENMARK
\"EC\" - ECUADOR
\"EE\" - ESTONIA
\"ET\" - ETHIOPIA
\"FI\" - FINLAND
\"FR\" - FRANCE
\"DE\" - GERMANY
\"GR\" - GREECE
\"HU\" - HUNGARY
\"IS\" - ICELAND
\"IN\" - INDIA
\"IE\" - IRELAND
\"IT\" - ITALY
\"JP\" - JAPAN
\"KZ\" - KAZAKHSTAN
\"KR\" - KOREA
\"LV\" - LATVIA
\"LI\" - LIECHTENSTEIN
\"LT\" - LITHUANIA
\"LU\" - LUXEMBOURG
\"MK\" - MACEDONIA
\"MT\" - MALTA
\"MX\" - MEXICO
\"MD\" - MOLDOVA
\"ME\" - MONTENEGRO
\"NL\" - NETHERLANDS
\"NZ\" - NEW ZEALAND
\"NI\" - NICARAGUA
\"NG\" - NIGERIA
\"NO\" - NORWAY
\"PA\" - PANAMA
\"PY\" - PARAGUAY
\"PE\" - PERU
\"PL\" - POLAND
\"PT\" - PORTUGAL
\"RO\" - ROMANIA
\"RU\" - RUSSIA
\"RS\" - SERBIA
\"SK\" - SLOVAKIA
\"SI\" - SLOVENIA
\"ZA\" - SOUTH AFRICA
\"ES\" - SPAIN
\"SE\" - SWEDEN
\"CH\" - SWITZERLAND
\"UA\" - UKRAINE
\"AE\" - UNITED ARAB EMIRATES
\"US\" - UNITED STATES
\"UK\" - UNITED KINGDOM
\"UY\" - URUGUAY
\"VE\" - VENEZUELA
The source of your training data, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the data and, optionally, an AWS Key Management Service (KMS) key. This object is submitted in the CreateDatasetImportJob request.
", + "base": "The source of your data, an AWS Identity and Access Management (IAM) role that allows Amazon Forecast to access the data and, optionally, an AWS Key Management Service (KMS) key.
", "refs": { "CreateDatasetImportJobRequest$DataSource": "The location of the training data to import and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.
If encryption is used, DataSource
must include an AWS Key Management Service (KMS) key and the IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in the EncryptionConfig
parameter of the CreateDataset operation.
Provides information about the Explainability resource.
", "refs": { - "DescribeAutoPredictorResponse$ExplainabilityInfo": null + "DescribeAutoPredictorResponse$ExplainabilityInfo": "Provides the status and ARN of the Predictor Explainability.
" } }, "ExplainabilitySummary": { @@ -684,13 +684,13 @@ } }, "Featurization": { - "base": "Provides featurization (transformation) information for a dataset field. This object is part of the FeaturizationConfig object.
For example:
{
\"AttributeName\": \"demand\",
FeaturizationPipeline [ {
\"FeaturizationMethodName\": \"filling\",
\"FeaturizationMethodParameters\": {\"aggregation\": \"avg\", \"backfill\": \"nan\"}
} ]
}
This object belongs to the CreatePredictor operation. If you created your predictor with CreateAutoPredictor, see AttributeConfig.
Provides featurization (transformation) information for a dataset field. This object is part of the FeaturizationConfig object.
For example:
{
\"AttributeName\": \"demand\",
FeaturizationPipeline [ {
\"FeaturizationMethodName\": \"filling\",
\"FeaturizationMethodParameters\": {\"aggregation\": \"avg\", \"backfill\": \"nan\"}
} ]
}
In a CreatePredictor operation, the specified algorithm trains a model using the specified dataset group. You can optionally tell the operation to modify data fields prior to training a model. These modifications are referred to as featurization.
You define featurization using the FeaturizationConfig
object. You specify an array of transformations, one for each field that you want to featurize. You then include the FeaturizationConfig
object in your CreatePredictor
request. Amazon Forecast applies the featurization to the TARGET_TIME_SERIES
and RELATED_TIME_SERIES
datasets before model training.
You can create multiple featurization configurations. For example, you might call the CreatePredictor
operation twice by specifying different featurization configurations.
This object belongs to the CreatePredictor operation. If you created your predictor with CreateAutoPredictor, see AttributeConfig.
In a CreatePredictor operation, the specified algorithm trains a model using the specified dataset group. You can optionally tell the operation to modify data fields prior to training a model. These modifications are referred to as featurization.
You define featurization using the FeaturizationConfig
object. You specify an array of transformations, one for each field that you want to featurize. You then include the FeaturizationConfig
object in your CreatePredictor
request. Amazon Forecast applies the featurization to the TARGET_TIME_SERIES
and RELATED_TIME_SERIES
datasets before model training.
You can create multiple featurization configurations. For example, you might call the CreatePredictor
operation twice by specifying different featurization configurations.
The featurization configuration.
", "DescribePredictorResponse$FeaturizationConfig": "The featurization configuration.
" @@ -748,8 +748,8 @@ "base": null, "refs": { "ListDatasetImportJobsRequest$Filters": "An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the datasets that match the statement from the list, respectively. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
. To include the datasets that match the statement, specify IS
. To exclude matching datasets, specify IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are DatasetArn
and Status
.
Value
- The value to match.
For example, to list all dataset import jobs whose status is ACTIVE, you specify the following filter:
\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]
An array of filters. For each filter, provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the resources that match the statement from the list. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are PredictorArn
and Status
.
Value
- The value to match.
An array of filters. For each filter, provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude resources that match the statement from the list. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are PredictorArn
and Status
.
Value
- The value to match.
An array of filters. For each filter, provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the resources that match the statement from the list. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are ResourceArn
and Status
.
Value
- The value to match.
An array of filters. For each filter, provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude resources that match the statement from the list. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are ResourceArn
and Status
.
Value
- The value to match.
An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the forecast export jobs that match the statement from the list, respectively. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
. To include the forecast export jobs that match the statement, specify IS
. To exclude matching forecast export jobs, specify IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are ForecastArn
and Status
.
Value
- The value to match.
For example, to list all jobs that export a forecast named electricityforecast, specify the following filter:
\"Filters\": [ { \"Condition\": \"IS\", \"Key\": \"ForecastArn\", \"Value\": \"arn:aws:forecast:us-west-2:<acct-id>:forecast/electricityforecast\" } ]
An array of filters. For each filter, you provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the forecasts that match the statement from the list, respectively. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
. To include the forecasts that match the statement, specify IS
. To exclude matching forecasts, specify IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are DatasetGroupArn
, PredictorArn
, and Status
.
Value
- The value to match.
For example, to list all forecasts whose status is not ACTIVE, you would specify:
\"Filters\": [ { \"Condition\": \"IS_NOT\", \"Key\": \"Status\", \"Value\": \"ACTIVE\" } ]
An array of filters. For each filter, provide a condition and a match statement. The condition is either IS
or IS_NOT
, which specifies whether to include or exclude the predictor backtest export jobs that match the statement from the list. The match statement consists of a key and a value.
Filter properties
Condition
- The condition to apply. Valid values are IS
and IS_NOT
. To include the predictor backtest export jobs that match the statement, specify IS
. To exclude matching predictor backtest export jobs, specify IS_NOT
.
Key
- The name of the parameter to filter on. Valid values are PredictorArn
and Status
.
Value
- The value to match.
An array of dimension (field) names that specify how to group the generated forecast.
For example, if you are generating forecasts for item sales across all your stores, and your dataset contains a store_id
field, you would specify store_id
as a dimension to group sales forecasts for each store.
An array of dimension (field) names that specify the attributes used to group your time series.
", "FeaturizationConfig$ForecastDimensions": "An array of dimension (field) names that specify how to group the generated forecast.
For example, suppose that you are generating a forecast for item sales across all of your stores, and your dataset contains a store_id
field. If you want the sales forecast for each item by store, you would specify store_id
as the dimension.
All forecast dimensions specified in the TARGET_TIME_SERIES
dataset don't need to be specified in the CreatePredictor
request. All forecast dimensions specified in the RELATED_TIME_SERIES
dataset must be specified in the CreatePredictor
request.
The data used to train a predictor. The data includes a dataset group and any supplementary features. You specify this object in the CreatePredictor request.
", + "base": "This object belongs to the CreatePredictor operation. If you created your predictor with CreateAutoPredictor, see DataConfig.
The data used to train a predictor. The data includes a dataset group and any supplementary features. You specify this object in the CreatePredictor request.
", "refs": { "CreatePredictorRequest$InputDataConfig": "Describes the dataset group that contains the data to use to train the predictor.
", "DescribePredictorResponse$InputDataConfig": "Describes the dataset group that contains the data to use to train the predictor.
" @@ -1001,8 +1002,8 @@ "LocalDateTime": { "base": null, "refs": { - "CreateExplainabilityRequest$StartDateTime": "If TimePointGranularity
is set to SPECIFIC
, define the first point for the Explainability.
If TimePointGranularity
is set to SPECIFIC
, define the last time point for the Explainability.
If TimePointGranularity
is set to SPECIFIC
, define the first point for the Explainability.
Use the following timestamp format: yyyy-MM-ddTHH:mm:ss (example: 2015-01-01T20:00:00)
", + "CreateExplainabilityRequest$EndDateTime": "If TimePointGranularity
is set to SPECIFIC
, define the last time point for the Explainability.
Use the following timestamp format: yyyy-MM-ddTHH:mm:ss (example: 2015-01-01T20:00:00)
", "DescribeExplainabilityResponse$StartDateTime": "If TimePointGranularity
is set to SPECIFIC
, the first time point in the Explainability.
If TimePointGranularity
is set to SPECIFIC
, the last time point in the Explainability.
The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, and an AWS Identity and Access Management (IAM) role that Amazon Forecast can assume to access the file(s). Optionally, includes an AWS Key Management Service (KMS) key. This object is part of the DataSource object that is submitted in the CreateDatasetImportJob request, and part of the DataDestination object.
", "refs": { "DataDestination$S3Config": "The path to an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the bucket.
", - "DataSource$S3Config": "The path to the training data stored in an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the data.
" + "DataSource$S3Config": "The path to the data stored in an Amazon Simple Storage Service (Amazon S3) bucket along with the credentials to access the data.
" } }, "S3Path": { @@ -1244,7 +1245,7 @@ } }, "Schema": { - "base": "Defines the fields of a dataset. You specify this object in the CreateDataset request.
", + "base": "Defines the fields of a dataset.
", "refs": { "CreateDatasetRequest$Schema": "The schema for the dataset. The schema attributes and their order must match the fields in your data. The dataset Domain
and DatasetType
that you choose determine the minimum required fields in your training data. For information about the required fields for a specific dataset domain and type, see howitworks-domains-ds-types.
The status of the predictor backtest export job. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the predictor. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
The Status
of the predictor must be ACTIVE
before you can use the predictor to create a forecast.
The status of the Explainability export. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the Explainability. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the Explainability. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The status of the forecast export job. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The Status
of the forecast export job must be ACTIVE
before you can access the forecast in your S3 bucket.
The status of the forecast. States include:
ACTIVE
CREATE_PENDING
, CREATE_IN_PROGRESS
, CREATE_FAILED
CREATE_STOPPING
, CREATE_STOPPED
DELETE_PENDING
, DELETE_IN_PROGRESS
, DELETE_FAILED
The Status
of the forecast must be ACTIVE
before you can query or export the forecast.
Describes a supplementary feature of a dataset group. This object is part of the InputDataConfig object. Forecast supports the Weather Index and Holidays built-in featurizations.
Weather Index
The Amazon Forecast Weather Index is a built-in featurization that incorporates historical and projected weather information into your model. The Weather Index supplements your datasets with over two years of historical weather data and up to 14 days of projected weather data. For more information, see Amazon Forecast Weather Index.
Holidays
Holidays is a built-in featurization that incorporates a feature-engineered dataset of national holiday information into your model. It provides native support for the holiday calendars of 66 countries. To view the holiday calendars, refer to the Jollyday library. For more information, see Holidays Featurization.
", + "base": "This object belongs to the CreatePredictor operation. If you created your predictor with CreateAutoPredictor, see AdditionalDataset.
Describes a supplementary feature of a dataset group. This object is part of the InputDataConfig object. Forecast supports the Weather Index and Holidays built-in featurizations.
Weather Index
The Amazon Forecast Weather Index is a built-in featurization that incorporates historical and projected weather information into your model. The Weather Index supplements your datasets with over two years of historical weather data and up to 14 days of projected weather data. For more information, see Amazon Forecast Weather Index.
Holidays
Holidays is a built-in featurization that incorporates a feature-engineered dataset of national holiday information into your model. It provides native support for the holiday calendars of 66 countries. To view the holiday calendars, refer to the Jollyday library. For more information, see Holidays Featurization.
", "refs": { "SupplementaryFeatures$member": null } diff --git a/models/apis/imagebuilder/2019-12-02/api-2.json b/models/apis/imagebuilder/2019-12-02/api-2.json index 3f94b302173..e55da2d1515 100644 --- a/models/apis/imagebuilder/2019-12-02/api-2.json +++ b/models/apis/imagebuilder/2019-12-02/api-2.json @@ -516,6 +516,20 @@ {"shape":"InvalidParameterCombinationException"} ] }, + "ImportVmImage":{ + "name":"ImportVmImage", + "http":{ + "method":"PUT", + "requestUri":"/ImportVmImage" + }, + "input":{"shape":"ImportVmImageRequest"}, + "output":{"shape":"ImportVmImageResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ClientException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "ListComponentBuildVersions":{ "name":"ListComponentBuildVersions", "http":{ @@ -965,6 +979,14 @@ }, "Arn":{"type":"string"}, "Boolean":{"type":"boolean"}, + "BuildType":{ + "type":"string", + "enum":[ + "USER_INITIATED", + "SCHEDULED", + "IMPORT" + ] + }, "CallRateLimitExceededException":{ "type":"structure", "members":{ @@ -1603,6 +1625,14 @@ "infrastructureConfigurationArn":{"shape":"InfrastructureConfigurationArn"} } }, + "DiskImageFormat":{ + "type":"string", + "enum":[ + "VMDK", + "RAW", + "VHD" + ] + }, "Distribution":{ "type":"structure", "required":["region"], @@ -1611,7 +1641,8 @@ "amiDistributionConfiguration":{"shape":"AmiDistributionConfiguration"}, "containerDistributionConfiguration":{"shape":"ContainerDistributionConfiguration"}, "licenseConfigurationArns":{"shape":"LicenseConfigurationArnList"}, - "launchTemplateConfigurations":{"shape":"LaunchTemplateConfigurationList"} + "launchTemplateConfigurations":{"shape":"LaunchTemplateConfigurationList"}, + "s3ExportConfiguration":{"shape":"S3ExportConfiguration"} } }, "DistributionConfiguration":{ @@ -1974,7 +2005,8 @@ "imageTestsConfiguration":{"shape":"ImageTestsConfiguration"}, "dateCreated":{"shape":"DateTime"}, "outputResources":{"shape":"OutputResources"}, - "tags":{"shape":"TagMap"} + "tags":{"shape":"TagMap"}, + "buildType":{"shape":"BuildType"} } }, "ImageBuildVersionArn":{ @@ -2101,7 +2133,8 @@ "owner":{"shape":"NonEmptyString"}, "dateCreated":{"shape":"DateTime"}, "outputResources":{"shape":"OutputResources"}, - "tags":{"shape":"TagMap"} + "tags":{"shape":"TagMap"}, + "buildType":{"shape":"BuildType"} } }, "ImageSummaryList":{ @@ -2137,7 +2170,8 @@ "platform":{"shape":"Platform"}, "osVersion":{"shape":"OsVersion"}, "owner":{"shape":"NonEmptyString"}, - "dateCreated":{"shape":"DateTime"} + "dateCreated":{"shape":"DateTime"}, + "buildType":{"shape":"BuildType"} } }, "ImageVersionArn":{ @@ -2188,6 +2222,37 @@ "componentBuildVersionArn":{"shape":"ComponentBuildVersionArn"} } }, + "ImportVmImageRequest":{ + "type":"structure", + "required":[ + "name", + "semanticVersion", + "platform", + "vmImportTaskId", + "clientToken" + ], + "members":{ + "name":{"shape":"NonEmptyString"}, + "semanticVersion":{"shape":"VersionNumber"}, + "description":{"shape":"NonEmptyString"}, + "platform":{"shape":"Platform"}, + "osVersion":{"shape":"OsVersion"}, + "vmImportTaskId":{"shape":"NonEmptyString"}, + "tags":{"shape":"TagMap"}, + "clientToken":{ + "shape":"ClientToken", + "idempotencyToken":true + } + } + }, + "ImportVmImageResponse":{ + "type":"structure", + "members":{ + "requestId":{"shape":"NonEmptyString"}, + "imageArn":{"shape":"Arn"}, + "clientToken":{"shape":"ClientToken"} + } + }, "InfrastructureConfiguration":{ "type":"structure", "members":{ @@ -2818,6 +2883,20 @@ "max":25, "min":1 }, + "S3ExportConfiguration":{ + "type":"structure", + "required":[ + "roleName", + "diskImageFormat", + "s3Bucket" + ], + "members":{ + "roleName":{"shape":"NonEmptyString"}, + "diskImageFormat":{"shape":"DiskImageFormat"}, + "s3Bucket":{"shape":"NonEmptyString"}, + "s3Prefix":{"shape":"NonEmptyString"} + } + }, "S3Logs":{ "type":"structure", "members":{ diff --git a/models/apis/imagebuilder/2019-12-02/docs-2.json b/models/apis/imagebuilder/2019-12-02/docs-2.json index eee1d7c2837..de9660d1991 100644 --- a/models/apis/imagebuilder/2019-12-02/docs-2.json +++ b/models/apis/imagebuilder/2019-12-02/docs-2.json @@ -29,6 +29,7 @@ "GetImageRecipePolicy": "Gets an image recipe policy.
", "GetInfrastructureConfiguration": "Gets an infrastructure configuration.
", "ImportComponent": "Imports a component and transforms its data into a component document.
", + "ImportVmImage": "When you export your virtual machine (VM) from its virtualization environment, that process creates a set of one or more disk container files that act as snapshots of your VM’s environment, settings, and data. The Amazon EC2 API ImportImage action uses those files to import your VM and create an AMI. To import using the CLI command, see import-image
You can reference the task ID from the VM import to pull in the AMI that the import created as the base image for your Image Builder recipe.
", "ListComponentBuildVersions": "Returns the list of component build versions for the specified semantic version.
The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.
Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.
Returns the list of component build versions for the specified semantic version.
The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.
Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.
Returns a list of container recipes.
", @@ -105,7 +106,8 @@ "ImagePipeline$imageRecipeArn": "The Amazon Resource Name (ARN) of the image recipe associated with this image pipeline.
", "ImagePipeline$containerRecipeArn": "The Amazon Resource Name (ARN) of the container recipe that is used for this pipeline.
", "ImagePipeline$infrastructureConfigurationArn": "The Amazon Resource Name (ARN) of the infrastructure configuration associated with this image pipeline.
", - "ImagePipeline$distributionConfigurationArn": "The Amazon Resource Name (ARN) of the distribution configuration associated with this image pipeline.
" + "ImagePipeline$distributionConfigurationArn": "The Amazon Resource Name (ARN) of the distribution configuration associated with this image pipeline.
", + "ImportVmImageResponse$imageArn": "The Amazon Resource Name (ARN) of the AMI that was created during the VM import process. This AMI is used as the base image for the recipe that imported the VM.
" } }, "Boolean": { @@ -116,6 +118,14 @@ "ListImagesRequest$byName": "Requests a list of images with a specific recipe name.
" } }, + "BuildType": { + "base": null, + "refs": { + "Image$buildType": "Indicates the type of build that created this image. The build can be initiated in the following ways:
USER_INITIATED – A manual pipeline build request.
SCHEDULED – A pipeline build initiated by a cron expression in the Image Builder pipeline, or from EventBridge.
IMPORT – A VM import created the image to use as the base image for the recipe.
Indicates the type of build that created this image. The build can be initiated in the following ways:
USER_INITIATED – A manual pipeline build request.
SCHEDULED – A pipeline build initiated by a cron expression in the Image Builder pipeline, or from EventBridge.
IMPORT – A VM import created the image to use as the base image for the recipe.
Indicates the type of build that created this image. The build can be initiated in the following ways:
USER_INITIATED – A manual pipeline build request.
SCHEDULED – A pipeline build initiated by a cron expression in the Image Builder pipeline, or from EventBridge.
IMPORT – A VM import created the image to use as the base image for the recipe.
You have exceeded the permitted request rate for the specific operation.
", "refs": { @@ -139,8 +149,8 @@ "ClientToken": { "base": null, "refs": { - "CancelImageCreationRequest$clientToken": "The idempotency token used to make this request idempotent.
", - "CancelImageCreationResponse$clientToken": "The idempotency token used to make this request idempotent.
", + "CancelImageCreationRequest$clientToken": "Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference.
", + "CancelImageCreationResponse$clientToken": "The idempotency token that was used for this request.
", "CreateComponentRequest$clientToken": "The idempotency token of the component.
", "CreateComponentResponse$clientToken": "The idempotency token used to make this request idempotent.
", "CreateContainerRecipeRequest$clientToken": "The client token used to make this request idempotent.
", @@ -157,6 +167,8 @@ "CreateInfrastructureConfigurationResponse$clientToken": "The idempotency token used to make this request idempotent.
", "ImportComponentRequest$clientToken": "The idempotency token of the component.
", "ImportComponentResponse$clientToken": "The idempotency token used to make this request idempotent.
", + "ImportVmImageRequest$clientToken": "Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference.
", + "ImportVmImageResponse$clientToken": "The idempotency token that was used for this request.
", "StartImagePipelineExecutionRequest$clientToken": "The idempotency token used to make this request idempotent.
", "StartImagePipelineExecutionResponse$clientToken": "The idempotency token used to make this request idempotent.
", "UpdateDistributionConfigurationRequest$clientToken": "The idempotency token of the distribution configuration.
", @@ -560,6 +572,12 @@ "refs": { } }, + "DiskImageFormat": { + "base": null, + "refs": { + "S3ExportConfiguration$diskImageFormat": "Export the updated image to one of the following supported disk image formats:
Virtual Hard Disk (VHD) – Compatible with Citrix Xen and Microsoft Hyper-V virtualization products.
Stream-optimized ESX Virtual Machine Disk (VMDK) – Compatible with VMware ESX and VMware vSphere versions 4, 5, and 6.
Raw – Raw format.
Defines the settings for a specific Region.
", "refs": { @@ -991,7 +1009,7 @@ } }, "ImageTestsConfiguration": { - "base": "Image tests configuration.
", + "base": "Configure image tests for your pipeline build. Tests run after building the image, to verify that the AMI or container image is valid before distributing it.
", "refs": { "CreateImagePipelineRequest$imageTestsConfiguration": "The image test configuration of the image pipeline.
", "CreateImageRequest$imageTestsConfiguration": "The image tests configuration of the image.
", @@ -1049,6 +1067,16 @@ "refs": { } }, + "ImportVmImageRequest": { + "base": null, + "refs": { + } + }, + "ImportVmImageResponse": { + "base": null, + "refs": { + } + }, "InfrastructureConfiguration": { "base": "Details of the infrastructure configuration.
", "refs": { @@ -1434,6 +1462,10 @@ "ImportComponentRequest$data": "The data of the component. Used to specify the data inline. Either data
or uri
can be used to specify the data within the component.
The ID of the KMS key that should be used to encrypt this component.
", "ImportComponentResponse$requestId": "The request ID that uniquely identifies this request.
", + "ImportVmImageRequest$name": "The name of the base image that is created by the import process.
", + "ImportVmImageRequest$description": "The description for the base image that is created by the import process.
", + "ImportVmImageRequest$vmImportTaskId": "The importTaskId
(API) or ImportTaskId
(CLI) from the Amazon EC2 VM import process. Image Builder retrieves information from the import process to pull in the AMI that is created from the VM source as the base image for your recipe.
The request ID that uniquely identifies this request.
", "InfrastructureConfiguration$description": "The description of the infrastructure configuration.
", "InfrastructureConfiguration$subnetId": "The subnet ID of the infrastructure configuration.
", "InfrastructureConfiguration$keyPair": "The Amazon EC2 key pair of the infrastructure configuration.
", @@ -1460,8 +1492,11 @@ "PutImagePolicyResponse$requestId": "The request ID that uniquely identifies this request.
", "PutImageRecipePolicyResponse$requestId": "The request ID that uniquely identifies this request.
", "RegionList$member": null, - "S3Logs$s3BucketName": "The Amazon S3 bucket in which to store the logs.
", - "S3Logs$s3KeyPrefix": "The Amazon S3 path in which to store the logs.
", + "S3ExportConfiguration$roleName": "The name of the role that grants VM Import/Export permission to export images to your S3 bucket.
", + "S3ExportConfiguration$s3Bucket": "The S3 bucket in which to store the output disk images for your VM.
", + "S3ExportConfiguration$s3Prefix": "The Amazon S3 path for the bucket where the output disk images for your VM are stored.
", + "S3Logs$s3BucketName": "The S3 bucket in which to store the logs.
", + "S3Logs$s3KeyPrefix": "The Amazon S3 path to the bucket where the logs are stored.
", "Schedule$scheduleExpression": "The cron expression determines how often EC2 Image Builder evaluates your pipelineExecutionStartCondition
.
For information on how to format a cron expression in Image Builder, see Use cron expressions in EC2 Image Builder.
", "SecurityGroupIds$member": null, "StartImagePipelineExecutionResponse$requestId": "The request ID that uniquely identifies this request.
", @@ -1489,7 +1524,7 @@ "EbsInstanceBlockDeviceSpecification$deleteOnTermination": "Use to configure delete on termination of the associated device.
", "Image$enhancedImageMetadataEnabled": "Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.
", "ImagePipeline$enhancedImageMetadataEnabled": "Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.
", - "ImageTestsConfiguration$imageTestsEnabled": "Defines if tests should be executed when building this image.
", + "ImageTestsConfiguration$imageTestsEnabled": "Determines if tests should run after building the image. Image Builder defaults to enable tests to run following the image build, before image distribution.
", "InfrastructureConfiguration$terminateInstanceOnFailure": "The terminate instance on failure configuration of the infrastructure configuration.
", "ListImagesRequest$includeDeprecated": "Includes deprecated images in the response list.
", "SystemsManagerAgent$uninstallAfterBuild": "Controls whether the Systems Manager agent is removed from your final build image, prior to creating the new AMI. If this is set to true, then the agent is removed from the final image. If it's set to false, then the agent is left in, so that it is included in the new AMI. The default value is false.
", @@ -1527,6 +1562,7 @@ "Image$osVersion": "The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.
", "ImageSummary$osVersion": "The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.
", "ImageVersion$osVersion": "The operating system version of the Amazon EC2 build instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.
", + "ImportVmImageRequest$osVersion": "The operating system version for the imported VM.
", "OsVersionList$member": null } }, @@ -1610,7 +1646,8 @@ "ImageRecipeSummary$platform": "The platform of the image recipe.
", "ImageSummary$platform": "The platform of the image.
", "ImageVersion$platform": "The platform of the image version, for example \"Windows\" or \"Linux\".
", - "ImportComponentRequest$platform": "The platform of the component.
" + "ImportComponentRequest$platform": "The platform of the component.
", + "ImportVmImageRequest$platform": "The operating system platform for the imported VM.
" } }, "PutComponentPolicyRequest": { @@ -1745,6 +1782,12 @@ "ListInfrastructureConfigurationsRequest$maxResults": "The maximum items to return in a request.
" } }, + "S3ExportConfiguration": { + "base": "Properties that configure export from your build instance to a compatible file format for your VM.
", + "refs": { + "Distribution$s3ExportConfiguration": "Configure export settings to deliver disk images created from your image build, using a file format that is compatible with your VMs in that Region.
" + } + }, "S3Logs": { "base": "Amazon S3 logging configuration.
", "refs": { @@ -1850,6 +1893,7 @@ "ImageRecipeSummary$tags": "The tags of the image recipe.
", "ImageSummary$tags": "The tags of the image.
", "ImportComponentRequest$tags": "The tags of the component.
", + "ImportVmImageRequest$tags": "Tags that are attached to the import resources.
", "InfrastructureConfiguration$tags": "The tags of the infrastructure configuration.
", "InfrastructureConfigurationSummary$tags": "The tags of the infrastructure configuration.
", "ListTagsForResourceResponse$tags": "The tags for the specified resource.
", @@ -1955,7 +1999,8 @@ "ImageRecipe$version": "The version of the image recipe.
", "ImageSummary$version": "The version of the image.
", "ImageVersion$version": "Details for a specific version of an Image Builder image. This version follows the semantic version syntax.
The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.
Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.
Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.
Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.
The semantic version of the component. This version follows the semantic version syntax.
The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.
Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.
The semantic version of the component. This version follows the semantic version syntax.
The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.
Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.
The semantic version to attach to the base image that was created during the import process. This version follows the semantic version syntax.
The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.
Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.
Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.
Specifies the pricing plan for the geofence collection.
For additional details and restrictions on each pricing plan option, see the Amazon Location Service pricing page.
", - "CreateMapRequest$PricingPlan": "Specifies the pricing plan for your map resource.
For additional details and restrictions on each pricing plan option, see Amazon Location Service pricing.
", - "CreatePlaceIndexRequest$PricingPlan": "Specifies the pricing plan for your place index resource.
For additional details and restrictions on each pricing plan option, see Amazon Location Service pricing.
", - "CreateRouteCalculatorRequest$PricingPlan": "Specifies the pricing plan for your route calculator resource.
For additional details and restrictions on each pricing plan option, see Amazon Location Service pricing.
", - "CreateTrackerRequest$PricingPlan": "Specifies the pricing plan for the tracker resource.
For additional details and restrictions on each pricing plan option, see Amazon Location Service pricing.
", + "CreateGeofenceCollectionRequest$PricingPlan": "Optionally specifies the pricing plan for the geofence collection. Defaults to RequestBasedUsage
.
For additional details and restrictions on each pricing plan option, see the Amazon Location Service pricing page.
", + "CreateMapRequest$PricingPlan": "Optionally specifies the pricing plan for the map resource. Defaults to RequestBasedUsage
.
For additional details and restrictions on each pricing plan option, see Amazon Location Service pricing.
", + "CreatePlaceIndexRequest$PricingPlan": "Optionally specifies the pricing plan for the place index resource. Defaults to RequestBasedUsage
.
For additional details and restrictions on each pricing plan option, see Amazon Location Service pricing.
", + "CreateRouteCalculatorRequest$PricingPlan": "Optionally specifies the pricing plan for the route calculator resource. Defaults to RequestBasedUsage
.
For additional details and restrictions on each pricing plan option, see Amazon Location Service pricing.
", + "CreateTrackerRequest$PricingPlan": "Optionally specifies the pricing plan for the tracker resource. Defaults to RequestBasedUsage
.
For additional details and restrictions on each pricing plan option, see Amazon Location Service pricing.
", "DescribeGeofenceCollectionResponse$PricingPlan": "The pricing plan selected for the specified geofence collection.
For additional details and restrictions on each pricing plan option, see the Amazon Location Service pricing page.
", "DescribeMapResponse$PricingPlan": "The pricing plan selected for the specified map resource.
<p>For additional details and restrictions on each pricing plan option, see <a href="https://aws.amazon.com/location/pricing/">Amazon Location Service pricing</a>.</p>
",
"DescribePlaceIndexResponse$PricingPlan": "The pricing plan selected for the specified place index resource.
For additional details and restrictions on each pricing plan option, see Amazon Location Service pricing.
", diff --git a/models/apis/redshift/2012-12-01/api-2.json b/models/apis/redshift/2012-12-01/api-2.json index 7d23edf5afc..85fdaf76fca 100644 --- a/models/apis/redshift/2012-12-01/api-2.json +++ b/models/apis/redshift/2012-12-01/api-2.json @@ -3255,7 +3255,8 @@ "DataShareArn":{"shape":"String"}, "ProducerArn":{"shape":"String"}, "AllowPubliclyAccessibleConsumers":{"shape":"Boolean"}, - "DataShareAssociations":{"shape":"DataShareAssociationList"} + "DataShareAssociations":{"shape":"DataShareAssociationList"}, + "ManagedBy":{"shape":"String"} } }, "DataShareAssociation":{ diff --git a/models/apis/redshift/2012-12-01/docs-2.json b/models/apis/redshift/2012-12-01/docs-2.json index af25fa4f5f7..507e6667574 100644 --- a/models/apis/redshift/2012-12-01/docs-2.json +++ b/models/apis/redshift/2012-12-01/docs-2.json @@ -6,7 +6,7 @@ "AddPartner": "Adds a partner integration to a cluster. This operation authorizes a partner to push status updates for the specified database. To complete the integration, you also set up the integration on the partner website.
", "AssociateDataShareConsumer": "From a datashare consumer account, associates a datashare with the account (AssociateEntireAccount) or the specified namespace (ConsumerArn). If you make this association, the consumer can consume the datashare.
", "AuthorizeClusterSecurityGroupIngress": "Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending on whether the application accessing your cluster is running on the Internet or an Amazon EC2 instance, you can authorize inbound access to either a Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2 security group. You can add as many as 20 ingress rules to an Amazon Redshift security group.
If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName and EC2SecurityGroupOwnerId. The Amazon EC2 security group and Amazon Redshift cluster must be in the same Amazon Web Services Region.
If you authorize access to a CIDR/IP address range, specify CIDRIP. For an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain Routing.
You must also associate the security group with a cluster so that clients running on these IP addresses or the EC2 instance are authorized to connect to the cluster. For information about managing security groups, go to Working with Security Groups in the Amazon Redshift Cluster Management Guide.
", - "AuthorizeDataShare": "From a data producer account, authorizes the sharing of a datashare with one or more consumer accounts. To authorize a datashare for a data consumer, the producer account must have the correct access privileges.
", + "AuthorizeDataShare": "From a data producer account, authorizes the sharing of a datashare with one or more consumer accounts or managing entities. To authorize a datashare for a data consumer, the producer account must have the correct access privileges.
", "AuthorizeEndpointAccess": "Grants access to a cluster.
", "AuthorizeSnapshotAccess": "Authorizes the specified Amazon Web Services account to restore the specified snapshot.
For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.
", "BatchDeleteClusterSnapshots": "Deletes a set of cluster snapshots.
", @@ -2904,7 +2904,7 @@ "AuthorizeClusterSecurityGroupIngressMessage$EC2SecurityGroupName": "The EC2 security group to be added the Amazon Redshift security group.
", "AuthorizeClusterSecurityGroupIngressMessage$EC2SecurityGroupOwnerId": "The Amazon Web Services account number of the owner of the security group specified by the EC2SecurityGroupName parameter. The Amazon Web Services Access Key ID is not an acceptable value.
Example: 111122223333
The Amazon Resource Name (ARN) of the datashare that producers are to authorize sharing for.
", - "AuthorizeDataShareMessage$ConsumerIdentifier": "The identifier of the data consumer that is authorized to access the datashare. This identifier is an Amazon Web Services account ID.
", + "AuthorizeDataShareMessage$ConsumerIdentifier": "The identifier of the data consumer that is authorized to access the datashare. This identifier is an Amazon Web Services account ID or a keyword, such as ADX.
", "AuthorizeEndpointAccessMessage$ClusterIdentifier": "The cluster identifier of the cluster to grant access to.
", "AuthorizeEndpointAccessMessage$Account": "The Amazon Web Services account ID to grant access to.
", "AuthorizeSnapshotAccessMessage$SnapshotIdentifier": "The identifier of the snapshot the account is authorized to restore.
", @@ -3034,11 +3034,12 @@ "CreateUsageLimitMessage$ClusterIdentifier": "The identifier of the cluster that you want to limit usage.
", "DataShare$DataShareArn": "An Amazon Resource Name (ARN) that references the datashare that is owned by a specific namespace of the producer cluster. A datashare ARN is in the arn:aws:redshift:{region}:{account-id}:{datashare}:{namespace-guid}/{datashare-name}
format.
The Amazon Resource Name (ARN) of the producer.
", + "DataShare$ManagedBy": "The identifier of a datashare to show its managing entity.
", "DataShareAssociation$ConsumerIdentifier": "The name of the consumer accounts that have an association with a producer datashare.
", "DataTransferProgress$Status": "Describes the status of the cluster. While the transfer is in progress the status is transferringdata
.
The Amazon Resource Name (ARN) of the datashare to remove authorization from.
", - "DeauthorizeDataShareMessage$ConsumerIdentifier": "The identifier of the data consumer that is to have authorization removed from the datashare. This identifier is an Amazon Web Services account ID.
", + "DeauthorizeDataShareMessage$ConsumerIdentifier": "The identifier of the data consumer that is to have authorization removed from the datashare. This identifier is an Amazon Web Services account ID or a keyword, such as ADX.
", "DefaultClusterParameters$ParameterGroupFamily": "The name of the cluster parameter group family to which the engine default parameters apply.
", "DefaultClusterParameters$Marker": "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker
parameter and retrying the command. If the Marker
field is empty, all response records have been retrieved for the request.
A unique identifier for the maintenance window.
", diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 2e4859327ac..a353b7e1650 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -3486,7 +3486,8 @@ "members":{ "DataSource":{"shape":"AutoMLDataSource"}, "CompressionType":{"shape":"CompressionType"}, - "TargetAttributeName":{"shape":"TargetAttributeName"} + "TargetAttributeName":{"shape":"TargetAttributeName"}, + "ContentType":{"shape":"ContentType"} } }, "AutoMLContainerDefinition":{ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 55955e78437..c93f3baef07 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -1714,6 +1714,7 @@ "ContentType": { "base": null, "refs": { + "AutoMLChannel$ContentType": "The content type of the data from the input source. You can use text/csv;header=present
or x-application/vnd.amazon+parquet
. The default value is text/csv;header=present
.
The MIME type of the data.
", "ContentTypes$member": null, "FileSource$ContentType": "The type of content stored in the file source.
", diff --git a/models/apis/securityhub/2018-10-26/api-2.json b/models/apis/securityhub/2018-10-26/api-2.json index 9e80afe3790..aa68fbdbe42 100644 --- a/models/apis/securityhub/2018-10-26/api-2.json +++ b/models/apis/securityhub/2018-10-26/api-2.json @@ -1192,6 +1192,16 @@ "ApiGatewayManaged":{"shape":"Boolean"} } }, + "AwsAutoScalingAutoScalingGroupAvailabilityZonesList":{ + "type":"list", + "member":{"shape":"AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails"} + }, + "AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails":{ + "type":"structure", + "members":{ + "Value":{"shape":"NonEmptyString"} + } + }, "AwsAutoScalingAutoScalingGroupDetails":{ "type":"structure", "members":{ @@ -1199,7 +1209,53 @@ "LoadBalancerNames":{"shape":"StringList"}, "HealthCheckType":{"shape":"NonEmptyString"}, "HealthCheckGracePeriod":{"shape":"Integer"}, - "CreatedTime":{"shape":"NonEmptyString"} + "CreatedTime":{"shape":"NonEmptyString"}, + "MixedInstancesPolicy":{"shape":"AwsAutoScalingAutoScalingGroupMixedInstancesPolicyDetails"}, + "AvailabilityZones":{"shape":"AwsAutoScalingAutoScalingGroupAvailabilityZonesList"} + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyDetails":{ + "type":"structure", + "members":{ + "InstancesDistribution":{"shape":"AwsAutoScalingAutoScalingGroupMixedInstancesPolicyInstancesDistributionDetails"}, + "LaunchTemplate":{"shape":"AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateDetails"} + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyInstancesDistributionDetails":{ + "type":"structure", + "members":{ + "OnDemandAllocationStrategy":{"shape":"NonEmptyString"}, + "OnDemandBaseCapacity":{"shape":"Integer"}, + "OnDemandPercentageAboveBaseCapacity":{"shape":"Integer"}, + "SpotAllocationStrategy":{"shape":"NonEmptyString"}, + "SpotInstancePools":{"shape":"Integer"}, + "SpotMaxPrice":{"shape":"NonEmptyString"} + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateDetails":{ + "type":"structure", + "members":{ + "LaunchTemplateSpecification":{"shape":"AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateLaunchTemplateSpecification"}, + "Overrides":{"shape":"AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateOverridesList"} + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateLaunchTemplateSpecification":{ + "type":"structure", + "members":{ + "LaunchTemplateId":{"shape":"NonEmptyString"}, + "LaunchTemplateName":{"shape":"NonEmptyString"}, + "Version":{"shape":"NonEmptyString"} + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateOverridesList":{ + "type":"list", + "member":{"shape":"AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateOverridesListDetails"} + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateOverridesListDetails":{ + "type":"structure", + "members":{ + "InstanceType":{"shape":"NonEmptyString"}, + "WeightedCapacity":{"shape":"NonEmptyString"} } }, "AwsAutoScalingLaunchConfigurationBlockDeviceMappingsDetails":{ @@ -1246,7 +1302,8 @@ "RamdiskId":{"shape":"NonEmptyString"}, "SecurityGroups":{"shape":"NonEmptyStringList"}, "SpotPrice":{"shape":"NonEmptyString"}, - "UserData":{"shape":"NonEmptyString"} + "UserData":{"shape":"NonEmptyString"}, + "MetadataOptions":{"shape":"AwsAutoScalingLaunchConfigurationMetadataOptions"} } }, "AwsAutoScalingLaunchConfigurationInstanceMonitoringDetails":{ @@ -1255,6 +1312,14 @@ "Enabled":{"shape":"Boolean"} } }, + "AwsAutoScalingLaunchConfigurationMetadataOptions":{ + "type":"structure", + "members":{ + "HttpEndpoint":{"shape":"NonEmptyString"}, + "HttpPutResponseHopLimit":{"shape":"Integer"}, + "HttpTokens":{"shape":"NonEmptyString"} + } + }, "AwsCertificateManagerCertificateDetails":{ "type":"structure", "members":{ @@ -3344,6 +3409,53 @@ } }, "AwsLambdaLayerVersionNumber":{"type":"long"}, + "AwsNetworkFirewallFirewallDetails":{ + "type":"structure", + "members":{ + "DeleteProtection":{"shape":"Boolean"}, + "Description":{"shape":"NonEmptyString"}, + "FirewallArn":{"shape":"NonEmptyString"}, + "FirewallId":{"shape":"NonEmptyString"}, + "FirewallName":{"shape":"NonEmptyString"}, + "FirewallPolicyArn":{"shape":"NonEmptyString"}, + "FirewallPolicyChangeProtection":{"shape":"Boolean"}, + "SubnetChangeProtection":{"shape":"Boolean"}, + "SubnetMappings":{"shape":"AwsNetworkFirewallFirewallSubnetMappingsList"}, + "VpcId":{"shape":"NonEmptyString"} + } + }, + "AwsNetworkFirewallFirewallPolicyDetails":{ + "type":"structure", + "members":{ + "FirewallPolicy":{"shape":"FirewallPolicyDetails"}, + "FirewallPolicyArn":{"shape":"NonEmptyString"}, + "FirewallPolicyId":{"shape":"NonEmptyString"}, + "FirewallPolicyName":{"shape":"NonEmptyString"}, + "Description":{"shape":"NonEmptyString"} + } + }, + "AwsNetworkFirewallFirewallSubnetMappingsDetails":{ + "type":"structure", + "members":{ + "SubnetId":{"shape":"NonEmptyString"} + } + }, + "AwsNetworkFirewallFirewallSubnetMappingsList":{ + "type":"list", + "member":{"shape":"AwsNetworkFirewallFirewallSubnetMappingsDetails"} + }, + "AwsNetworkFirewallRuleGroupDetails":{ + "type":"structure", + "members":{ + "Capacity":{"shape":"Integer"}, + "Description":{"shape":"NonEmptyString"}, + "RuleGroup":{"shape":"RuleGroupDetails"}, + "RuleGroupArn":{"shape":"NonEmptyString"}, + "RuleGroupId":{"shape":"NonEmptyString"}, + "RuleGroupName":{"shape":"NonEmptyString"}, + "Type":{"shape":"NonEmptyString"} + } + }, "AwsOpenSearchServiceDomainClusterConfigDetails":{ "type":"structure", "members":{ @@ -4096,6 +4208,13 @@ "type":"list", "member":{"shape":"AwsS3BucketBucketLifecycleConfigurationRulesTransitionsDetails"} }, + "AwsS3BucketBucketVersioningConfiguration":{ + "type":"structure", + "members":{ + "IsMfaDeleteEnabled":{"shape":"Boolean"}, + "Status":{"shape":"NonEmptyString"} + } + }, "AwsS3BucketDetails":{ "type":"structure", "members":{ @@ -4109,7 +4228,8 @@ "AccessControlList":{"shape":"NonEmptyString"}, "BucketLoggingConfiguration":{"shape":"AwsS3BucketLoggingConfiguration"}, "BucketWebsiteConfiguration":{"shape":"AwsS3BucketWebsiteConfiguration"}, - "BucketNotificationConfiguration":{"shape":"AwsS3BucketNotificationConfiguration"} + "BucketNotificationConfiguration":{"shape":"AwsS3BucketNotificationConfiguration"}, + "BucketVersioningConfiguration":{"shape":"AwsS3BucketBucketVersioningConfiguration"} } }, "AwsS3BucketLoggingConfiguration":{ @@ -5292,6 +5412,48 @@ "Original":{"shape":"NonEmptyString"} } }, + "FirewallPolicyDetails":{ + "type":"structure", + "members":{ + "StatefulRuleGroupReferences":{"shape":"FirewallPolicyStatefulRuleGroupReferencesList"}, + "StatelessCustomActions":{"shape":"FirewallPolicyStatelessCustomActionsList"}, + "StatelessDefaultActions":{"shape":"NonEmptyStringList"}, + "StatelessFragmentDefaultActions":{"shape":"NonEmptyStringList"}, + "StatelessRuleGroupReferences":{"shape":"FirewallPolicyStatelessRuleGroupReferencesList"} + } + }, + "FirewallPolicyStatefulRuleGroupReferencesDetails":{ + "type":"structure", + "members":{ + "ResourceArn":{"shape":"NonEmptyString"} + } + }, + "FirewallPolicyStatefulRuleGroupReferencesList":{ + "type":"list", + "member":{"shape":"FirewallPolicyStatefulRuleGroupReferencesDetails"} + }, + "FirewallPolicyStatelessCustomActionsDetails":{ + "type":"structure", + "members":{ + "ActionDefinition":{"shape":"StatelessCustomActionDefinition"}, + "ActionName":{"shape":"NonEmptyString"} + } + }, + "FirewallPolicyStatelessCustomActionsList":{ + "type":"list", + "member":{"shape":"FirewallPolicyStatelessCustomActionsDetails"} + }, + "FirewallPolicyStatelessRuleGroupReferencesDetails":{ + "type":"structure", + "members":{ + "Priority":{"shape":"Integer"}, + "ResourceArn":{"shape":"NonEmptyString"} + } + }, + "FirewallPolicyStatelessRuleGroupReferencesList":{ + "type":"list", + "member":{"shape":"FirewallPolicyStatelessRuleGroupReferencesDetails"} + }, "GeoLocation":{ "type":"structure", "members":{ @@ -6233,7 +6395,10 @@ "AwsWafRateBasedRule":{"shape":"AwsWafRateBasedRuleDetails"}, "AwsWafRegionalRateBasedRule":{"shape":"AwsWafRegionalRateBasedRuleDetails"}, "AwsEcrRepository":{"shape":"AwsEcrRepositoryDetails"}, - "AwsEksCluster":{"shape":"AwsEksClusterDetails"} + "AwsEksCluster":{"shape":"AwsEksClusterDetails"}, + "AwsNetworkFirewallFirewallPolicy":{"shape":"AwsNetworkFirewallFirewallPolicyDetails"}, + "AwsNetworkFirewallFirewall":{"shape":"AwsNetworkFirewallFirewallDetails"}, + "AwsNetworkFirewallRuleGroup":{"shape":"AwsNetworkFirewallRuleGroupDetails"} } }, "ResourceList":{ @@ -6260,6 +6425,191 @@ "type":"list", "member":{"shape":"Result"} }, + "RuleGroupDetails":{ + "type":"structure", + "members":{ + "RuleVariables":{"shape":"RuleGroupVariables"}, + "RulesSource":{"shape":"RuleGroupSource"} + } + }, + "RuleGroupSource":{ + "type":"structure", + "members":{ + "RulesSourceList":{"shape":"RuleGroupSourceListDetails"}, + "RulesString":{"shape":"NonEmptyString"}, + "StatefulRules":{"shape":"RuleGroupSourceStatefulRulesList"}, + "StatelessRulesAndCustomActions":{"shape":"RuleGroupSourceStatelessRulesAndCustomActionsDetails"} + } + }, + "RuleGroupSourceCustomActionsDetails":{ + "type":"structure", + "members":{ + "ActionDefinition":{"shape":"StatelessCustomActionDefinition"}, + "ActionName":{"shape":"NonEmptyString"} + } + }, + "RuleGroupSourceCustomActionsList":{ + "type":"list", + "member":{"shape":"RuleGroupSourceCustomActionsDetails"} + }, + "RuleGroupSourceListDetails":{ + "type":"structure", + "members":{ + "GeneratedRulesType":{"shape":"NonEmptyString"}, + "TargetTypes":{"shape":"NonEmptyStringList"}, + "Targets":{"shape":"NonEmptyStringList"} + } + }, + "RuleGroupSourceStatefulRulesDetails":{ + "type":"structure", + "members":{ + "Action":{"shape":"NonEmptyString"}, + "Header":{"shape":"RuleGroupSourceStatefulRulesHeaderDetails"}, + "RuleOptions":{"shape":"RuleGroupSourceStatefulRulesOptionsList"} + } + }, + "RuleGroupSourceStatefulRulesHeaderDetails":{ + "type":"structure", + "members":{ + "Destination":{"shape":"NonEmptyString"}, + "DestinationPort":{"shape":"NonEmptyString"}, + "Direction":{"shape":"NonEmptyString"}, + "Protocol":{"shape":"NonEmptyString"}, + "Source":{"shape":"NonEmptyString"}, + "SourcePort":{"shape":"NonEmptyString"} + } + }, + "RuleGroupSourceStatefulRulesList":{ + "type":"list", + "member":{"shape":"RuleGroupSourceStatefulRulesDetails"} + }, + "RuleGroupSourceStatefulRulesOptionsDetails":{ + "type":"structure", + "members":{ + "Keyword":{"shape":"NonEmptyString"}, + "Settings":{"shape":"RuleGroupSourceStatefulRulesRuleOptionsSettingsList"} + } + }, + "RuleGroupSourceStatefulRulesOptionsList":{ + "type":"list", + "member":{"shape":"RuleGroupSourceStatefulRulesOptionsDetails"} + }, + "RuleGroupSourceStatefulRulesRuleOptionsSettingsList":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, + "RuleGroupSourceStatelessRuleDefinition":{ + "type":"structure", + "members":{ + "Actions":{"shape":"NonEmptyStringList"}, + "MatchAttributes":{"shape":"RuleGroupSourceStatelessRuleMatchAttributes"} + } + }, + "RuleGroupSourceStatelessRuleMatchAttributes":{ + "type":"structure", + "members":{ + "DestinationPorts":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesDestinationPortsList"}, + "Destinations":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesDestinationsList"}, + "Protocols":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesProtocolsList"}, + "SourcePorts":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesSourcePortsList"}, + "Sources":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesSourcesList"}, + "TcpFlags":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesTcpFlagsList"} + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesDestinationPorts":{ + "type":"structure", + "members":{ + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"} + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesDestinationPortsList":{ + "type":"list", + "member":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesDestinationPorts"} + }, + "RuleGroupSourceStatelessRuleMatchAttributesDestinations":{ + "type":"structure", + "members":{ + "AddressDefinition":{"shape":"NonEmptyString"} + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesDestinationsList":{ + "type":"list", + "member":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesDestinations"} + }, + "RuleGroupSourceStatelessRuleMatchAttributesProtocolsList":{ + "type":"list", + "member":{"shape":"Integer"} + }, + "RuleGroupSourceStatelessRuleMatchAttributesSourcePorts":{ + "type":"structure", + "members":{ + "FromPort":{"shape":"Integer"}, + "ToPort":{"shape":"Integer"} + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesSourcePortsList":{ + "type":"list", + "member":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesSourcePorts"} + }, + "RuleGroupSourceStatelessRuleMatchAttributesSources":{ + "type":"structure", + "members":{ + "AddressDefinition":{"shape":"NonEmptyString"} + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesSourcesList":{ + "type":"list", + "member":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesSources"} + }, + "RuleGroupSourceStatelessRuleMatchAttributesTcpFlags":{ + "type":"structure", + "members":{ + "Flags":{"shape":"NonEmptyStringList"}, + "Masks":{"shape":"NonEmptyStringList"} + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesTcpFlagsList":{ + "type":"list", + "member":{"shape":"RuleGroupSourceStatelessRuleMatchAttributesTcpFlags"} + }, + "RuleGroupSourceStatelessRulesAndCustomActionsDetails":{ + "type":"structure", + "members":{ + "CustomActions":{"shape":"RuleGroupSourceCustomActionsList"}, + "StatelessRules":{"shape":"RuleGroupSourceStatelessRulesList"} + } + }, + "RuleGroupSourceStatelessRulesDetails":{ + "type":"structure", + "members":{ + "Priority":{"shape":"Integer"}, + "RuleDefinition":{"shape":"RuleGroupSourceStatelessRuleDefinition"} + } + }, + "RuleGroupSourceStatelessRulesList":{ + "type":"list", + "member":{"shape":"RuleGroupSourceStatelessRulesDetails"} + }, + "RuleGroupVariables":{ + "type":"structure", + "members":{ + "IpSets":{"shape":"RuleGroupVariablesIpSetsDetails"}, + "PortSets":{"shape":"RuleGroupVariablesPortSetsDetails"} + } + }, + "RuleGroupVariablesIpSetsDetails":{ + "type":"structure", + "members":{ + "Definition":{"shape":"NonEmptyStringList"} + } + }, + "RuleGroupVariablesPortSetsDetails":{ + "type":"structure", + "members":{ + "Definition":{"shape":"NonEmptyStringList"} + } + }, "SecurityGroups":{ "type":"list", "member":{"shape":"NonEmptyString"} @@ -6406,6 +6756,13 @@ "INCOMPLETE" ] }, + "StandardsStatusReason":{ + "type":"structure", + "required":["StatusReasonCode"], + "members":{ + "StatusReasonCode":{"shape":"StatusReasonCode"} + } + }, "StandardsSubscription":{ "type":"structure", "required":[ @@ -6418,7 +6775,8 @@ "StandardsSubscriptionArn":{"shape":"NonEmptyString"}, "StandardsArn":{"shape":"NonEmptyString"}, "StandardsInput":{"shape":"StandardsInputParameterMap"}, - "StandardsStatus":{"shape":"StandardsStatus"} + "StandardsStatus":{"shape":"StandardsStatus"}, + "StandardsStatusReason":{"shape":"StandardsStatusReason"} } }, "StandardsSubscriptionArns":{ @@ -6445,6 +6803,28 @@ "type":"list", "member":{"shape":"StandardsSubscription"} }, + "StatelessCustomActionDefinition":{ + "type":"structure", + "members":{ + "PublishMetricAction":{"shape":"StatelessCustomPublishMetricAction"} + } + }, + "StatelessCustomPublishMetricAction":{ + "type":"structure", + "members":{ + "Dimensions":{"shape":"StatelessCustomPublishMetricActionDimensionsList"} + } + }, + "StatelessCustomPublishMetricActionDimension":{ + "type":"structure", + "members":{ + "Value":{"shape":"NonEmptyString"} + } + }, + "StatelessCustomPublishMetricActionDimensionsList":{ + "type":"list", + "member":{"shape":"StatelessCustomPublishMetricActionDimension"} + }, "StatusReason":{ "type":"structure", "required":["ReasonCode"], @@ -6453,6 +6833,13 @@ "Description":{"shape":"NonEmptyString"} } }, + "StatusReasonCode":{ + "type":"string", + "enum":[ + "NO_AVAILABLE_CONFIGURATION_RECORDER", + "INTERNAL_ERROR" + ] + }, "StatusReasonsList":{ "type":"list", "member":{"shape":"StatusReason"} diff --git a/models/apis/securityhub/2018-10-26/docs-2.json b/models/apis/securityhub/2018-10-26/docs-2.json index ad56153eafe..8d08b7fafbd 100644 --- a/models/apis/securityhub/2018-10-26/docs-2.json +++ b/models/apis/securityhub/2018-10-26/docs-2.json @@ -6,7 +6,7 @@ "AcceptInvitation": "This method is deprecated. Instead, use AcceptAdministratorInvitation
.
The Security Hub console continues to use AcceptInvitation
. It will eventually change to use AcceptAdministratorInvitation
. Any IAM policies that specifically control access to this function must continue to use AcceptInvitation
. You should also add AcceptAdministratorInvitation
to your policies to ensure that the correct permissions are in place after the console begins to use AcceptAdministratorInvitation
.
Accepts the invitation to be a member account and be monitored by the Security Hub administrator account that the invitation was sent from.
This operation is only used by member accounts that are not added through Organizations.
When the member account accepts the invitation, permission is granted to the administrator account to view findings generated in the member account.
", "BatchDisableStandards": "Disables the standards specified by the provided StandardsSubscriptionArns
.
For more information, see Security Standards section of the Security Hub User Guide.
", "BatchEnableStandards": "Enables the standards specified by the provided StandardsArn
. To obtain the ARN for a standard, use the DescribeStandards
operation.
For more information, see the Security Standards section of the Security Hub User Guide.
", - "BatchImportFindings": "Imports security findings generated from an integrated product into Security Hub. This action is requested by the integrated product to import its findings into Security Hub.
The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.
After a finding is created, BatchImportFindings
cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow.
Note
UserDefinedFields
VerificationState
Workflow
Finding providers also should not use BatchImportFindings
to update the following attributes.
Confidence
Criticality
RelatedFindings
Severity
Types
Instead, finding providers use FindingProviderFields
to provide values for these attributes.
Imports security findings generated by a finding provider into Security Hub. This action is requested by the finding provider to import its findings into Security Hub.
BatchImportFindings
must be called by one of the following:
The account that is associated with the findings. The identifier of the associated account is the value of the AwsAccountId
attribute for the finding.
An account that is allow-listed for an official Security Hub partner integration.
The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.
After a finding is created, BatchImportFindings
cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow.
Note
UserDefinedFields
VerificationState
Workflow
Finding providers also should not use BatchImportFindings
to update the following attributes.
Confidence
Criticality
RelatedFindings
Severity
Types
Instead, finding providers use FindingProviderFields
to provide values for these attributes.
Used by Security Hub customers to update information about their investigation into a finding. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account.
Updates from BatchUpdateFindings
do not affect the value of UpdatedAt
for a finding.
Administrator and member accounts can use BatchUpdateFindings
to update the following finding fields and objects.
Confidence
Criticality
Note
RelatedFindings
Severity
Types
UserDefinedFields
VerificationState
Workflow
You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. See Configuring access to BatchUpdateFindings in the Security Hub User Guide.
", "CreateActionTarget": "Creates a custom action target in Security Hub.
You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events.
", "CreateFindingAggregator": "Used to enable finding aggregation. Must be called from the aggregation Region.
For more details about cross-Region replication, see Configuring finding aggregation in the Security Hub User Guide.
", @@ -291,12 +291,60 @@ "ResourceDetails$AwsApiGatewayV2Stage": "Provides information about a version 2 stage for Amazon API Gateway.
" } }, + "AwsAutoScalingAutoScalingGroupAvailabilityZonesList": { + "base": null, + "refs": { + "AwsAutoScalingAutoScalingGroupDetails$AvailabilityZones": "The list of Availability Zones for the automatic scaling group.
" + } + }, + "AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails": { + "base": "An Availability Zone for the automatic scaling group.
", + "refs": { + "AwsAutoScalingAutoScalingGroupAvailabilityZonesList$member": null + } + }, "AwsAutoScalingAutoScalingGroupDetails": { "base": "Provides details about an auto scaling group.
", "refs": { "ResourceDetails$AwsAutoScalingAutoScalingGroup": "Details for an autoscaling group.
" } }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyDetails": { + "base": "The mixed instances policy for the automatic scaling group.
", + "refs": { + "AwsAutoScalingAutoScalingGroupDetails$MixedInstancesPolicy": "The mixed instances policy for the automatic scaling group.
" + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyInstancesDistributionDetails": { + "base": "Information about the instances distribution.
", + "refs": { + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyDetails$InstancesDistribution": "The instances distribution. The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.
" + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateDetails": { + "base": "Describes a launch template and overrides for a mixed instances policy.
", + "refs": { + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyDetails$LaunchTemplate": "The launch template to use and the instance types (overrides) to use to provision EC2 instances to fulfill On-Demand and Spot capacities.
" + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateLaunchTemplateSpecification": { + "base": "Details about the launch template to use.
", + "refs": { + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateDetails$LaunchTemplateSpecification": "The launch template to use.
" + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateOverridesList": { + "base": null, + "refs": { + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateDetails$Overrides": "Property values to use to override the values in the launch template.
" + } + }, + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateOverridesListDetails": { + "base": "Property values to use to override the values in the launch template.
", + "refs": { + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateOverridesList$member": null + } + }, "AwsAutoScalingLaunchConfigurationBlockDeviceMappingsDetails": { "base": "A block device for the instance.
", "refs": { @@ -327,6 +375,12 @@ "AwsAutoScalingLaunchConfigurationDetails$InstanceMonitoring": "Indicates the type of monitoring for instances in the group.
" } }, + "AwsAutoScalingLaunchConfigurationMetadataOptions": { + "base": "The metadata options for the instances.
", + "refs": { + "AwsAutoScalingLaunchConfigurationDetails$MetadataOptions": "The metadata options for the instances.
" + } + }, "AwsCertificateManagerCertificateDetails": { "base": "Provides details about an Certificate Manager certificate.
", "refs": { @@ -1924,6 +1978,36 @@ "AwsLambdaLayerVersionDetails$Version": "The version number.
" } }, + "AwsNetworkFirewallFirewallDetails": { + "base": "Details about an Network Firewall firewall.
", + "refs": { + "ResourceDetails$AwsNetworkFirewallFirewall": "Details about an Network Firewall firewall.
" + } + }, + "AwsNetworkFirewallFirewallPolicyDetails": { + "base": "Details about a firewall policy. A firewall policy defines the behavior of a network firewall.
", + "refs": { + "ResourceDetails$AwsNetworkFirewallFirewallPolicy": "Details about an Network Firewall firewall policy.
" + } + }, + "AwsNetworkFirewallFirewallSubnetMappingsDetails": { + "base": "A public subnet that Network Firewall uses for the firewall.
", + "refs": { + "AwsNetworkFirewallFirewallSubnetMappingsList$member": null + } + }, + "AwsNetworkFirewallFirewallSubnetMappingsList": { + "base": null, + "refs": { + "AwsNetworkFirewallFirewallDetails$SubnetMappings": "The public subnets that Network Firewall uses for the firewall. Each subnet must belong to a different Availability Zone.
" + } + }, + "AwsNetworkFirewallRuleGroupDetails": { + "base": "Details about an Network Firewall rule group. Rule groups are used to inspect and control network traffic. Stateless rule groups apply to individual packets. Stateful rule groups apply to packets in the context of their traffic flow.
Rule groups are referenced in firewall policies.
", + "refs": { + "ResourceDetails$AwsNetworkFirewallRuleGroup": "Details about an Network Firewall rule group.
" + } + }, "AwsOpenSearchServiceDomainClusterConfigDetails": { "base": "Details about the configuration of an OpenSearch cluster.
", "refs": { @@ -2406,6 +2490,12 @@ "AwsS3BucketBucketLifecycleConfigurationRulesDetails$Transitions": "Transition rules that indicate when objects transition to a specified storage class.
" } }, + "AwsS3BucketBucketVersioningConfiguration": { + "base": "Describes the versioning state of an S3 bucket.
", + "refs": { + "AwsS3BucketDetails$BucketVersioningConfiguration": "The versioning state of an S3 bucket.
" + } + }, "AwsS3BucketDetails": { "base": "The details of an Amazon S3 bucket.
", "refs": { @@ -2830,6 +2920,9 @@ "AwsIamPolicyDetails$IsAttachable": "Whether the policy can be attached to a user, group, or role.
", "AwsIamPolicyVersion$IsDefaultVersion": "Whether the version is the default version.
", "AwsKmsKeyDetails$KeyRotationStatus": "Whether the key has key rotation enabled.
", + "AwsNetworkFirewallFirewallDetails$DeleteProtection": "Whether the firewall is protected from deletion. If set to true
, then the firewall cannot be deleted.
Whether the firewall is protected from a change to the firewall policy. If set to true
, you cannot associate a different policy with the firewall.
Whether the firewall is protected from a change to the subnet associations. If set to true
, you cannot map different subnets to the firewall.
Whether UltraWarm is enabled.
", "AwsOpenSearchServiceDomainClusterConfigDetails$DedicatedMasterEnabled": "Whether to use a dedicated master node for the OpenSearch domain. A dedicated master node performs cluster management tasks, but does not hold data or respond to data upload requests.
", "AwsOpenSearchServiceDomainClusterConfigDetails$ZoneAwarenessEnabled": "Whether to enable zone awareness for the OpenSearch domain. When zone awareness is enabled, OpenSearch Service allocates the cluster's nodes and replica index shards across Availability Zones (AZs) in the same Region. This prevents data loss and minimizes downtime if a node or data center fails.
", @@ -2876,6 +2969,7 @@ "AwsS3AccountPublicAccessBlockDetails$IgnorePublicAcls": "Indicates whether Amazon S3 ignores public ACLs that are associated with an S3 bucket.
", "AwsS3AccountPublicAccessBlockDetails$RestrictPublicBuckets": "Indicates whether to restrict access to an access point or S3 bucket that has a public policy to only Amazon Web Services service principals and authorized users within the S3 bucket owner's account.
", "AwsS3BucketBucketLifecycleConfigurationRulesDetails$ExpiredObjectDeleteMarker": "Whether Amazon S3 removes a delete marker that has no noncurrent versions. If set to true
, the delete marker is expired. If set to false
, the policy takes no action.
If you provide ExpiredObjectDeleteMarker
, you cannot provide ExpirationInDays
or ExpirationDate
.
Specifies whether MFA delete is currently enabled in the S3 bucket versioning configuration. If the S3 bucket was never configured with MFA delete, then this attribute is not included.
", "AwsSecretsManagerSecretDetails$RotationOccurredWithinFrequency": "Whether the rotation occurred within the specified rotation frequency.
", "AwsSecretsManagerSecretDetails$RotationEnabled": "Whether rotation is enabled.
", "AwsSecretsManagerSecretDetails$Deleted": "Whether the secret is deleted.
", @@ -3371,6 +3465,48 @@ "FindingProviderFields$Severity": "The severity of a finding.
" } }, + "FirewallPolicyDetails": { + "base": "Defines the behavior of the firewall.
", + "refs": { + "AwsNetworkFirewallFirewallPolicyDetails$FirewallPolicy": "The firewall policy configuration.
" + } + }, + "FirewallPolicyStatefulRuleGroupReferencesDetails": { + "base": "A stateful rule group that is used by the firewall policy.
", + "refs": { + "FirewallPolicyStatefulRuleGroupReferencesList$member": null + } + }, + "FirewallPolicyStatefulRuleGroupReferencesList": { + "base": null, + "refs": { + "FirewallPolicyDetails$StatefulRuleGroupReferences": "The stateful rule groups that are used in the firewall policy.
" + } + }, + "FirewallPolicyStatelessCustomActionsDetails": { + "base": "A custom action that can be used for stateless packet handling.
", + "refs": { + "FirewallPolicyStatelessCustomActionsList$member": null + } + }, + "FirewallPolicyStatelessCustomActionsList": { + "base": null, + "refs": { + "FirewallPolicyDetails$StatelessCustomActions": "The custom action definitions that are available to use in the firewall policy's StatelessDefaultActions
setting.
A stateless rule group that is used by the firewall policy.
", + "refs": { + "FirewallPolicyStatelessRuleGroupReferencesList$member": null + } + }, + "FirewallPolicyStatelessRuleGroupReferencesList": { + "base": null, + "refs": { + "FirewallPolicyDetails$StatelessRuleGroupReferences": "The stateless rule groups that are used in the firewall policy.
" + } + }, "GeoLocation": { "base": "Provides the latitude and longitude coordinates of a location.
", "refs": { @@ -3525,8 +3661,12 @@ "AwsApiGatewayRestApiDetails$MinimumCompressionSize": "The minimum size in bytes of a payload before compression is enabled.
If null
, then compression is disabled.
If 0, then all payloads are compressed.
", "AwsApiGatewayV2RouteSettings$ThrottlingBurstLimit": "The throttling burst limit.
", "AwsAutoScalingAutoScalingGroupDetails$HealthCheckGracePeriod": "The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before it checks the health status of an EC2 instance that has come into service.
", + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyInstancesDistributionDetails$OnDemandBaseCapacity": "The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances.
", + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyInstancesDistributionDetails$OnDemandPercentageAboveBaseCapacity": "The percentage of On-Demand Instances and Spot Instances for additional capacity beyond OnDemandBaseCapacity
.
The number of Spot Instance pools across which to allocate your Spot Instances.
", "AwsAutoScalingLaunchConfigurationBlockDeviceMappingsEbsDetails$Iops": "The number of input/output (I/O) operations per second (IOPS) to provision for the volume.
Only supported for gp3
or io1
volumes. Required for io1
volumes. Not used with standard
, gp2
, st1
, or sc1
volumes.
The volume size, in GiBs. The following are the supported volumes sizes for each volume type:
gp2 and gp3: 1-16,384
io1: 4-16,384
st1 and sc1: 125-16,384
standard: 1-1,024
You must specify either SnapshotId
or VolumeSize
. If you specify both SnapshotId
and VolumeSize
, the volume size must be equal or greater than the size of the snapshot.
The HTTP PUT
response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel.
The number of status codes that can cause a failover.
", "AwsCloudFrontDistributionOriginGroupFailoverStatusCodesItemList$member": null, "AwsCodeBuildProjectSource$GitCloneDepth": "Information about the Git clone depth for the build project.
", @@ -3598,6 +3738,7 @@ "AwsLambdaFunctionDetails$MemorySize": "The memory that is allocated to the function.
", "AwsLambdaFunctionDetails$Timeout": "The amount of time that Lambda allows a function to run before stopping it.
", "AwsLambdaFunctionLayer$CodeSize": "The size of the layer archive in bytes.
", + "AwsNetworkFirewallRuleGroupDetails$Capacity": "The maximum number of operating resources that this rule group can use.
", "AwsOpenSearchServiceDomainClusterConfigDetails$InstanceCount": "The number of data nodes to use in the OpenSearch domain.
", "AwsOpenSearchServiceDomainClusterConfigDetails$WarmCount": "The number of UltraWarm instances.
", "AwsOpenSearchServiceDomainClusterConfigDetails$DedicatedMasterCount": "The number of instances to use for the master node. If this attribute is specified, then DedicatedMasterEnabled
must be true
.
The number of findings that failed to import.
", "BatchImportFindingsResponse$SuccessCount": "The number of findings that were successfully imported.
", "DateRange$Value": "A date range value for the date filter.
", + "FirewallPolicyStatelessRuleGroupReferencesDetails$Priority": "The order in which to run the stateless rule group.
", "GetInvitationsCountResponse$InvitationsCount": "The number of all membership invitations sent to this Security Hub member account, not including the currently accepted invitation.
", "IcmpTypeCode$Code": "The ICMP code for which to deny or allow access. To deny or allow all codes, use the value -1.
", "IcmpTypeCode$Type": "The ICMP type for which to deny or allow access. To deny or allow all types, use the value -1.
", @@ -3679,6 +3821,12 @@ "PortRangeFromTo$To": "The last port in the port range.
", "ProcessDetails$Pid": "The process ID.
", "ProcessDetails$ParentPid": "The parent process ID.
", + "RuleGroupSourceStatelessRuleMatchAttributesDestinationPorts$FromPort": "The starting port value for the port range.
", + "RuleGroupSourceStatelessRuleMatchAttributesDestinationPorts$ToPort": "The ending port value for the port range.
", + "RuleGroupSourceStatelessRuleMatchAttributesProtocolsList$member": null, + "RuleGroupSourceStatelessRuleMatchAttributesSourcePorts$FromPort": "The starting port value for the port range.
", + "RuleGroupSourceStatelessRuleMatchAttributesSourcePorts$ToPort": "The ending port value for the port range.
", + "RuleGroupSourceStatelessRulesDetails$Priority": "Indicates the order in which to run this rule relative to all of the rules in the stateless rule group.
", "Severity$Normalized": "Deprecated. The normalized severity of a finding. This attribute is being deprecated. Instead of providing Normalized
, provide Label
.
If you provide Label
and do not provide Normalized
, then Normalized
is set automatically as follows.
INFORMATIONAL
- 0
LOW
- 1
MEDIUM
- 40
HIGH
- 70
CRITICAL
- 90
Indicates when the stage was most recently updated.
Uses the date-time
format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z
.
The name of the stage.
", "AwsApiGatewayV2StageDetails$LastDeploymentStatusMessage": "The status of the last deployment of a stage. Supported only if the stage has automatic deployment enabled.
", + "AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails$Value": "The name of the Availability Zone.
", "AwsAutoScalingAutoScalingGroupDetails$LaunchConfigurationName": "The name of the launch configuration.
", "AwsAutoScalingAutoScalingGroupDetails$HealthCheckType": "The service to use for the health checks.
", "AwsAutoScalingAutoScalingGroupDetails$CreatedTime": "Indicates when the auto scaling group was created.
Uses the date-time
format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z
.
How to allocate instance types to fulfill On-Demand capacity.
", + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyInstancesDistributionDetails$SpotAllocationStrategy": "How to allocate instances across Spot Instance pools.
", + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyInstancesDistributionDetails$SpotMaxPrice": "The maximum price per unit hour that you are willing to pay for a Spot Instance.
", + "AwsAutoScalingAutoScalingGroupMixedInstancesPolicyLaunchTemplateLaunchTemplateSpecification$LaunchTemplateId": "The identifier of the launch template. You must specify either LaunchTemplateId
or LaunchTemplateName
.
The name of the launch template. You must specify either LaunchTemplateId
or LaunchTemplateName
.
Identifies the version of the launch template. You can specify a version identifier, or use the values $Latest
or $Default
.
The instance type. For example, m3.xlarge
.
The number of capacity units provided by the specified instance type in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic.
", "AwsAutoScalingLaunchConfigurationBlockDeviceMappingsDetails$DeviceName": "The device name that is exposed to the EC2 instance. For example, /dev/sdh
or xvdh
.
The name of the virtual device (for example, ephemeral0
).
You can provide either VirtualName
or Ebs
, but not both.
The snapshot ID of the volume to use.
You must specify either VolumeSize
or SnapshotId
.
The identifier of the RAM disk associated with the AMI.
", "AwsAutoScalingLaunchConfigurationDetails$SpotPrice": "The maximum hourly price to be paid for any Spot Instance that is launched to fulfill the request.
", "AwsAutoScalingLaunchConfigurationDetails$UserData": "The user data to make available to the launched EC2 instances. Must be base64-encoded text.
", + "AwsAutoScalingLaunchConfigurationMetadataOptions$HttpEndpoint": "Enables or disables the HTTP metadata endpoint on your instances. By default, the metadata endpoint is enabled.
", + "AwsAutoScalingLaunchConfigurationMetadataOptions$HttpTokens": "Indicates whether token usage is required
or optional
for metadata requests. By default, token usage is optional
.
The ARN of the private certificate authority (CA) that will be used to issue the certificate.
", "AwsCertificateManagerCertificateDetails$CreatedAt": "Indicates when the certificate was requested.
Uses the date-time
format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z
.
The fully qualified domain name (FQDN), such as www.example.com, that is secured by the certificate.
", @@ -4580,6 +4739,22 @@ "AwsLambdaFunctionTracingConfig$Mode": "The tracing mode.
", "AwsLambdaFunctionVpcConfig$VpcId": "The ID of the VPC.
", "AwsLambdaLayerVersionDetails$CreatedDate": "Indicates when the version was created.
Uses the date-time
format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z
.
A description of the firewall.
", + "AwsNetworkFirewallFirewallDetails$FirewallArn": "The ARN of the firewall.
", + "AwsNetworkFirewallFirewallDetails$FirewallId": "The identifier of the firewall.
", + "AwsNetworkFirewallFirewallDetails$FirewallName": "A descriptive name of the firewall.
", + "AwsNetworkFirewallFirewallDetails$FirewallPolicyArn": "The ARN of the firewall policy.
", + "AwsNetworkFirewallFirewallDetails$VpcId": "The identifier of the VPC where the firewall is used.
", + "AwsNetworkFirewallFirewallPolicyDetails$FirewallPolicyArn": "The ARN of the firewall policy.
", + "AwsNetworkFirewallFirewallPolicyDetails$FirewallPolicyId": "The identifier of the firewall policy.
", + "AwsNetworkFirewallFirewallPolicyDetails$FirewallPolicyName": "The name of the firewall policy.
", + "AwsNetworkFirewallFirewallPolicyDetails$Description": "A description of the firewall policy.
", + "AwsNetworkFirewallFirewallSubnetMappingsDetails$SubnetId": "The identifier of the subnet
", + "AwsNetworkFirewallRuleGroupDetails$Description": "A description of the rule group.
", + "AwsNetworkFirewallRuleGroupDetails$RuleGroupArn": "The ARN of the rule group.
", + "AwsNetworkFirewallRuleGroupDetails$RuleGroupId": "The identifier of the rule group.
", + "AwsNetworkFirewallRuleGroupDetails$RuleGroupName": "The descriptive name of the rule group.
", + "AwsNetworkFirewallRuleGroupDetails$Type": "The type of rule group. A rule group can be stateful or stateless.
", "AwsOpenSearchServiceDomainClusterConfigDetails$InstanceType": "The instance type for your data nodes.
", "AwsOpenSearchServiceDomainClusterConfigDetails$WarmType": "The type of UltraWarm instance.
", "AwsOpenSearchServiceDomainClusterConfigDetails$DedicatedMasterType": "The hardware configuration of the computer that hosts the dedicated master node.
If this attribute is specified, then DedicatedMasterEnabled
must be true
.
The class of storage to change the object to after the object is noncurrent for the specified number of days.
", "AwsS3BucketBucketLifecycleConfigurationRulesTransitionsDetails$Date": "A date on which to transition objects to the specified storage class. If you provide Date
, you cannot provide Days
.
Uses the date-time
format specified in RFC 3339 section 5.6, Internet Date/Time Format. The value cannot contain spaces. For example, 2020-03-22T13:22:13.933Z
.
The storage class to transition the object to.
", + "AwsS3BucketBucketVersioningConfiguration$Status": "The versioning status of the S3 bucket.
", "AwsS3BucketDetails$OwnerId": "The canonical user ID of the owner of the S3 bucket.
", "AwsS3BucketDetails$OwnerName": "The display name of the owner of the S3 bucket.
", "AwsS3BucketDetails$OwnerAccountId": "The Amazon Web Services account identifier of the account that owns the S3 bucket.
", @@ -4938,6 +5114,9 @@ "FieldMap$value": null, "FindingAggregator$FindingAggregatorArn": "The ARN of the finding aggregator. You use the finding aggregator ARN to retrieve details for, update, and delete the finding aggregator.
", "FindingProviderSeverity$Original": "The finding provider's original value for the severity.
", + "FirewallPolicyStatefulRuleGroupReferencesDetails$ResourceArn": "The ARN of the stateful rule group.
", + "FirewallPolicyStatelessCustomActionsDetails$ActionName": "The name of the custom action.
", + "FirewallPolicyStatelessRuleGroupReferencesDetails$ResourceArn": "The ARN of the stateless rule group.
", "GetFindingAggregatorRequest$FindingAggregatorArn": "The ARN of the finding aggregator to return details for. To obtain the ARN, use ListFindingAggregators
.
The ARN of the finding aggregator.
", "GetFindingAggregatorResponse$FindingAggregationRegion": "The aggregation Region.
", @@ -5033,6 +5212,20 @@ "ResourceNotFoundException$Message": null, "ResourceNotFoundException$Code": null, "Result$ProcessingResult": "The reason that the account was not processed.
", + "RuleGroupSource$RulesString": "Stateful inspection criteria, provided in Suricata compatible intrusion prevention system (IPS) rules.
", + "RuleGroupSourceCustomActionsDetails$ActionName": "A descriptive name of the custom action.
", + "RuleGroupSourceListDetails$GeneratedRulesType": "Indicates whether to allow or deny access to the domains listed in Targets
.
Defines what Network Firewall should do with the packets in a traffic flow when the flow matches the stateful rule criteria.
", + "RuleGroupSourceStatefulRulesHeaderDetails$Destination": "The destination IP address or address range to inspect for, in CIDR notation. To match with any address, specify ANY
.
The destination port to inspect for. You can specify an individual port, such as 1994
. You also can specify a port range, such as 1990:1994
. To match with any port, specify ANY
.
The direction of traffic flow to inspect. If set to ANY
, the inspection matches bidirectional traffic, both from the source to the destination and from the destination to the source. If set to FORWARD
, the inspection only matches traffic going from the source to the destination.
The protocol to inspect for. To inspector for all protocols, use IP
.
The source IP address or address range to inspect for, in CIDR notation. To match with any address, specify ANY
.
The source port to inspect for. You can specify an individual port, such as 1994
. You also can specify a port range, such as 1990:1994
. To match with any port, specify ANY
.
A keyword to look for.
", + "RuleGroupSourceStatefulRulesRuleOptionsSettingsList$member": null, + "RuleGroupSourceStatelessRuleMatchAttributesDestinations$AddressDefinition": "An IP address or a block of IP addresses.
", + "RuleGroupSourceStatelessRuleMatchAttributesSources$AddressDefinition": "An IP address or a block of IP addresses.
", "SecurityGroups$member": null, "SensitiveDataDetections$Type": "The type of sensitive data that was detected. For example, the type might indicate that the data is an email address.
", "SensitiveDataResult$Category": "The category of sensitive data that was detected. For example, the category can indicate that the sensitive data involved credentials, financial information, or personal information.
", @@ -5060,6 +5253,7 @@ "StandardsSubscription$StandardsArn": "The ARN of a standard.
", "StandardsSubscriptionArns$member": null, "StandardsSubscriptionRequest$StandardsArn": "The ARN of the standard that you want to enable. To view the list of available standards and their ARNs, use the DescribeStandards
operation.
The value to use for the custom metric dimension.
", "StatusReason$ReasonCode": "A code that represents a reason for the control status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the Security Hub User Guide.
", "StatusReason$Description": "The corresponding description for the status reason code.
", "StringFilter$Value": "The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is Security Hub
. If you provide security hub
as the filter text, then there is no match.
The list of security group IDs that are associated with the VPC endpoints for the domain.
", "AwsOpenSearchServiceDomainVpcOptionsDetails$SubnetIds": "A list of subnet IDs that are associated with the VPC endpoints for the domain.
", "AwsRdsEventSubscriptionDetails$EventCategoriesList": "The list of event categories for the event notification subscription.
", - "AwsRdsEventSubscriptionDetails$SourceIdsList": "A list of source identifiers for the event notification subscription.
" + "AwsRdsEventSubscriptionDetails$SourceIdsList": "A list of source identifiers for the event notification subscription.
", + "FirewallPolicyDetails$StatelessDefaultActions": "The actions to take on a packet if it doesn't match any of the stateless rules in the policy.
You must specify a standard action (aws:pass
, aws:drop
, aws:forward_to_sfe
), and can optionally include a custom action from StatelessCustomActions
.
The actions to take on a fragmented UDP packet if it doesn't match any of the stateless rules in the policy.
You must specify a standard action (aws:pass
, aws:drop
, aws:forward_to_sfe
), and can optionally include a custom action from StatelessCustomActions
.
The protocols that you want to inspect. Specify LS_SNI
for HTTPS. Specify HTTP_HOST
for HTTP. You can specify either or both.
The domains that you want to inspect for in your traffic flows. You can provide full domain names, or use the '.' prefix as a wildcard. For example, .example.com
matches all domains that end with example.com
.
The actions to take on a packet that matches one of the stateless rule definition's match attributes. You must specify a standard action (aws:pass
, aws:drop
, or aws:forward_to_sfe
). You can then add custom actions.
Defines the flags from the Masks
setting that must be set in order for the packet to match. Flags that are listed must be set. Flags that are not listed must not be set.
The set of flags to consider in the inspection. If not specified, then all flags are inspected.
", + "RuleGroupVariablesIpSetsDetails$Definition": "The list of IP addresses and ranges.
", + "RuleGroupVariablesPortSetsDetails$Definition": "The list of port ranges.
" } }, "Note": { @@ -5402,6 +5605,186 @@ "InviteMembersResponse$UnprocessedAccounts": "The list of Amazon Web Services accounts that could not be processed. For each account, the list includes the account ID and the email address.
" } }, + "RuleGroupDetails": { + "base": "Details about the rule group.
", + "refs": { + "AwsNetworkFirewallRuleGroupDetails$RuleGroup": "Details about the rule group.
" + } + }, + "RuleGroupSource": { + "base": "The rules and actions for the rule group.
", + "refs": { + "RuleGroupDetails$RulesSource": "The rules and actions for the rule group.
For stateful rule groups, can contain RulesString
, RulesSourceList
, or StatefulRules
.
For stateless rule groups, contains StatelessRulesAndCustomActions
.
A custom action definition. A custom action is an optional, non-standard action to use for stateless packet handling.
", + "refs": { + "RuleGroupSourceCustomActionsList$member": null + } + }, + "RuleGroupSourceCustomActionsList": { + "base": null, + "refs": { + "RuleGroupSourceStatelessRulesAndCustomActionsDetails$CustomActions": "Custom actions for the rule group.
" + } + }, + "RuleGroupSourceListDetails": { + "base": "Stateful inspection criteria for a domain list rule group.
", + "refs": { + "RuleGroupSource$RulesSourceList": "Stateful inspection criteria for a domain list rule group. A domain list rule group determines access by specific protocols to specific domains.
" + } + }, + "RuleGroupSourceStatefulRulesDetails": { + "base": "A Suricata rule specification.
", + "refs": { + "RuleGroupSourceStatefulRulesList$member": null + } + }, + "RuleGroupSourceStatefulRulesHeaderDetails": { + "base": "The inspection criteria for a stateful rule.
", + "refs": { + "RuleGroupSourceStatefulRulesDetails$Header": "The stateful inspection criteria for the rule.
" + } + }, + "RuleGroupSourceStatefulRulesList": { + "base": null, + "refs": { + "RuleGroupSource$StatefulRules": "Suricata rule specifications.
" + } + }, + "RuleGroupSourceStatefulRulesOptionsDetails": { + "base": "A rule option for a stateful rule.
", + "refs": { + "RuleGroupSourceStatefulRulesOptionsList$member": null + } + }, + "RuleGroupSourceStatefulRulesOptionsList": { + "base": null, + "refs": { + "RuleGroupSourceStatefulRulesDetails$RuleOptions": "Additional options for the rule.
" + } + }, + "RuleGroupSourceStatefulRulesRuleOptionsSettingsList": { + "base": null, + "refs": { + "RuleGroupSourceStatefulRulesOptionsDetails$Settings": "A list of settings.
" + } + }, + "RuleGroupSourceStatelessRuleDefinition": { + "base": "The definition of the stateless rule.
", + "refs": { + "RuleGroupSourceStatelessRulesDetails$RuleDefinition": "Provides the definition of the stateless rule.
" + } + }, + "RuleGroupSourceStatelessRuleMatchAttributes": { + "base": "Criteria for the stateless rule.
", + "refs": { + "RuleGroupSourceStatelessRuleDefinition$MatchAttributes": "The criteria for Network Firewall to use to inspect an individual packet in a stateless rule inspection.
" + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesDestinationPorts": { + "base": "A port range to specify the destination ports to inspect for.
", + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributesDestinationPortsList$member": null + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesDestinationPortsList": { + "base": null, + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributes$DestinationPorts": "A list of port ranges to specify the destination ports to inspect for.
" + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesDestinations": { + "base": "A destination IP address or range.
", + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributesDestinationsList$member": null + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesDestinationsList": { + "base": null, + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributes$Destinations": "The destination IP addresses and address ranges to inspect for, in CIDR notation.
" + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesProtocolsList": { + "base": null, + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributes$Protocols": "The protocols to inspect for.
" + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesSourcePorts": { + "base": "A port range to specify the source ports to inspect for.
", + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributesSourcePortsList$member": null + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesSourcePortsList": { + "base": null, + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributes$SourcePorts": "A list of port ranges to specify the source ports to inspect for.
" + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesSources": { + "base": "A source IP addresses and address range to inspect for.
", + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributesSourcesList$member": null + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesSourcesList": { + "base": null, + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributes$Sources": "The source IP addresses and address ranges to inspect for, in CIDR notation.
" + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesTcpFlags": { + "base": "A set of TCP flags and masks to inspect for.
", + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributesTcpFlagsList$member": null + } + }, + "RuleGroupSourceStatelessRuleMatchAttributesTcpFlagsList": { + "base": null, + "refs": { + "RuleGroupSourceStatelessRuleMatchAttributes$TcpFlags": "The TCP flags and masks to inspect for.
" + } + }, + "RuleGroupSourceStatelessRulesAndCustomActionsDetails": { + "base": "Stateless rules and custom actions for a stateless rule group.
", + "refs": { + "RuleGroupSource$StatelessRulesAndCustomActions": "The stateless rules and custom actions used by a stateless rule group.
" + } + }, + "RuleGroupSourceStatelessRulesDetails": { + "base": "A stateless rule in the rule group.
", + "refs": { + "RuleGroupSourceStatelessRulesList$member": null + } + }, + "RuleGroupSourceStatelessRulesList": { + "base": null, + "refs": { + "RuleGroupSourceStatelessRulesAndCustomActionsDetails$StatelessRules": "Stateless rules for the rule group.
" + } + }, + "RuleGroupVariables": { + "base": "Additional settings to use in the specified rules.
", + "refs": { + "RuleGroupDetails$RuleVariables": "Additional settings to use in the specified rules.
" + } + }, + "RuleGroupVariablesIpSetsDetails": { + "base": "A list of IP addresses and address ranges, in CIDR notation.
", + "refs": { + "RuleGroupVariables$IpSets": "A list of IP addresses and address ranges, in CIDR notation.
" + } + }, + "RuleGroupVariablesPortSetsDetails": { + "base": "A list of port ranges.
", + "refs": { + "RuleGroupVariables$PortSets": "A list of port ranges.
" + } + }, "SecurityGroups": { "base": null, "refs": { @@ -5532,6 +5915,12 @@ "StandardsSubscription$StandardsStatus": "The status of the standard subscription.
The status values are as follows:
PENDING
- Standard is in the process of being enabled.
READY
- Standard is enabled.
INCOMPLETE
- Standard could not be enabled completely. Some controls may not be available.
DELETING
- Standard is in the process of being disabled.
FAILED
- Standard could not be disabled.
The reason for the current status of a standard subscription.
", + "refs": { + "StandardsSubscription$StandardsStatusReason": "The reason for the current status.
" + } + }, "StandardsSubscription": { "base": "A resource that represents your subscription to a supported standard.
", "refs": { @@ -5565,12 +5954,43 @@ "GetEnabledStandardsResponse$StandardsSubscriptions": "The list of StandardsSubscriptions
objects that include information about the enabled standards.
The definition of a custom action that can be used for stateless packet handling.
", + "refs": { + "FirewallPolicyStatelessCustomActionsDetails$ActionDefinition": "The definition of the custom action.
", + "RuleGroupSourceCustomActionsDetails$ActionDefinition": "The definition of a custom action.
" + } + }, + "StatelessCustomPublishMetricAction": { + "base": "Information about metrics to publish to CloudWatch.
", + "refs": { + "StatelessCustomActionDefinition$PublishMetricAction": "Information about metrics to publish to CloudWatch.
" + } + }, + "StatelessCustomPublishMetricActionDimension": { + "base": "Defines a CloudWatch dimension value to publish.
", + "refs": { + "StatelessCustomPublishMetricActionDimensionsList$member": null + } + }, + "StatelessCustomPublishMetricActionDimensionsList": { + "base": null, + "refs": { + "StatelessCustomPublishMetricAction$Dimensions": "Defines CloudWatch dimension values to publish.
" + } + }, "StatusReason": { "base": "Provides additional context for the value of Compliance.Status
.
The reason code that represents the reason for the current status of a standard subscription.
" + } + }, "StatusReasonsList": { "base": null, "refs": { @@ -5643,7 +6063,7 @@ "AwsSecurityFindingFilters$ComplianceStatus": "Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS Amazon Web Services Foundations. Contains security standard-related finding details.
", "AwsSecurityFindingFilters$VerificationState": "The veracity of a finding.
", "AwsSecurityFindingFilters$WorkflowState": "The workflow state of a finding.
Note that this field is deprecated. To search for a finding based on its workflow status, use WorkflowStatus
.
The status of the investigation into a finding. Allowed values are the following.
NEW
- The initial state of a finding, before it is reviewed.
Security Hub also resets the workflow status from NOTIFIED
or RESOLVED
to NEW
in the following cases:
The record state changes from ARCHIVED
to ACTIVE
.
The compliance status changes from PASSED
to either WARNING
, FAILED
, or NOT_AVAILABLE
.
NOTIFIED
- Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.
SUPPRESSED
- The finding will not be reviewed again and will not be acted upon.
RESOLVED
- The finding was reviewed and remediated and is now considered resolved.
The status of the investigation into a finding. Allowed values are the following.
NEW
- The initial state of a finding, before it is reviewed.
Security Hub also resets the workflow status from NOTIFIED
or RESOLVED
to NEW
in the following cases:
RecordState
changes from ARCHIVED
to ACTIVE
.
Compliance.Status
changes from PASSED
to either WARNING
, FAILED
, or NOT_AVAILABLE
.
NOTIFIED
- Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.
If one of the following occurs, the workflow status is changed automatically from NOTIFIED
to NEW
:
RecordState
changes from ARCHIVED
to ACTIVE
.
Compliance.Status
changes from PASSED
to FAILED
, WARNING
, or NOT_AVAILABLE
.
SUPPRESSED
- Indicates that you reviewed the finding and do not believe that any action is needed.
The workflow status of a SUPPRESSED
finding does not change if RecordState
changes from ARCHIVED
to ACTIVE
.
RESOLVED
- The finding was reviewed and remediated and is now considered resolved.
The finding remains RESOLVED
unless one of the following occurs:
RecordState
changes from ARCHIVED
to ACTIVE
.
Compliance.Status
changes from PASSED
to FAILED
, WARNING
, or NOT_AVAILABLE
.
In those cases, the workflow status is automatically reset to NEW
.
For findings from controls, if Compliance.Status
is PASSED
, then Security Hub automatically sets the workflow status to RESOLVED
.
The updated record state for the finding.
", "AwsSecurityFindingFilters$RelatedFindingsProductArn": "The ARN of the solution that generated a related finding.
", "AwsSecurityFindingFilters$RelatedFindingsId": "The solution-generated identifier for a related finding.
", diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index cc5cd965367..fac09a3e996 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -1569,6 +1569,7 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-3" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "cloudcontrolapi-fips.ca-central-1.amazonaws.com", @@ -5675,6 +5676,7 @@ "hostname" : "groundstation-fips.us-west-2.amazonaws.com" }, "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { "variants" : [ { "hostname" : "groundstation-fips.us-east-1.amazonaws.com", @@ -7141,6 +7143,19 @@ "us-east-1" : { } } }, + "lookoutmetrics" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "lookoutvision" : { "endpoints" : { "ap-northeast-1" : { }, @@ -7481,6 +7496,38 @@ "us-west-2" : { } } }, + "meetings-chime" : { + "endpoints" : { + "ap-southeast-1" : { }, + "eu-central-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "meetings-chime-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "meetings-chime-fips.us-east-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "meetings-chime-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "meetings-chime-fips.us-west-2.amazonaws.com" + } + } + }, "messaging-chime" : { "endpoints" : { "us-east-1" : { @@ -9502,6 +9549,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "s3.dualstack.ap-southeast-3.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "aws-global" : { "credentialScope" : { "region" : "us-east-1" diff --git a/service/apigateway/api.go b/service/apigateway/api.go index 19493d38c07..f3938090e94 100644 --- a/service/apigateway/api.go +++ b/service/apigateway/api.go @@ -23712,7 +23712,7 @@ func (s *PutRestApiInput) SetRestApiId(v string) *PutRestApiInput { type QuotaSettings struct { _ struct{} `type:"structure"` - // The maximum number of requests that can be made in a given time period. + // The target maximum number of requests that can be made in a given time period. Limit *int64 `locationName:"limit" type:"integer"` // The day that a time period starts. For example, with a time period of WEEK, @@ -24995,12 +24995,11 @@ func (s *TestInvokeMethodOutput) SetStatus(v int64) *TestInvokeMethodOutput { type ThrottleSettings struct { _ struct{} `type:"structure"` - // The API request burst limit, the maximum rate limit over a time ranging from - // one to a few seconds, depending upon whether the underlying token bucket - // is at its full capacity. + // The API target request burst rate limit. This allows more requests through + // for a period of time than the target rate limit. BurstLimit *int64 `locationName:"burstLimit" type:"integer"` - // The API request steady-state rate limit. + // The API target request rate limit. RateLimit *float64 `locationName:"rateLimit" type:"double"` } @@ -27299,8 +27298,12 @@ func (s *Usage) SetUsagePlanId(v string) *Usage { return s } -// Represents a usage plan than can specify who can assess associated API stages -// with specified request limits and quotas. +// Represents a usage plan used to specify who can assess associated API stages. +// Optionally, target request rate and quota limits can be set. In some cases +// clients can exceed the targets that you set. Don’t rely on usage plans +// to control costs. Consider using AWS Budgets (https://docs.aws.amazon.com/cost-management/latest/userguide/budgets-managing-costs.html) +// to monitor costs and AWS WAF (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html) +// to manage API requests. // // In a usage plan, you associate an API by specifying the API's Id and a stage // name of the specified API. You add plan customers by adding API keys to the @@ -27326,13 +27329,14 @@ type UsagePlan struct { // a SaaS product on AWS Marketplace. ProductCode *string `locationName:"productCode" type:"string"` - // The maximum number of permitted requests per a given unit time interval. + // The target maximum number of permitted requests per a given unit time interval. Quota *QuotaSettings `locationName:"quota" type:"structure"` // The collection of tags. Each tag element is associated with a given resource. Tags map[string]*string `locationName:"tags" type:"map"` - // The request throttle limits of a usage plan. + // Map containing method level throttling information for API stage in a usage + // plan. Throttle *ThrottleSettings `locationName:"throttle" type:"structure"` } diff --git a/service/customerprofiles/api.go b/service/customerprofiles/api.go index 7d5cb588292..7d2650bef55 100644 --- a/service/customerprofiles/api.go +++ b/service/customerprofiles/api.go @@ -1282,8 +1282,6 @@ func (c *CustomerProfiles) GetMatchesRequest(input *GetMatchesInput) (req *reque // GetMatches API operation for Amazon Connect Customer Profiles. // -// This API is in preview release for Amazon Connect and subject to change. -// // Before calling this API, use CreateDomain (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_CreateDomain.html) // or UpdateDomain (https://docs.aws.amazon.com/customerprofiles/latest/APIReference/API_UpdateDomain.html) // to enable identity resolution: set Matching to true. @@ -2328,8 +2326,6 @@ func (c *CustomerProfiles) MergeProfilesRequest(input *MergeProfilesInput) (req // MergeProfiles API operation for Amazon Connect Customer Profiles. // -// This API is in preview release for Amazon Connect and subject to change. -// // Runs an AWS Lambda job that does the following: // // All the profileKeys in the ProfileToBeMerged will be moved to the main profile. @@ -6160,9 +6156,14 @@ type GetIntegrationOutput struct { LastUpdatedAt *time.Time `type:"timestamp" required:"true"` // The name of the profile object type. - // - // ObjectTypeName is a required field - ObjectTypeName *string `min:"1" type:"string" required:"true"` + ObjectTypeName *string `min:"1" type:"string"` + + // A map in which each key is an event type from an external application such + // as Segment or Shopify, and each value is an ObjectTypeName (template) used + // to ingest the event. It supports the following event types: SegmentIdentify, + // ShopifyCreateCustomers, ShopifyUpdateCustomers, ShopifyCreateDraftOrders, + // ShopifyUpdateDraftOrders, ShopifyCreateOrders, and ShopifyUpdatedOrders. + ObjectTypeNames map[string]*string `type:"map"` // The tags used to organize, track, or control access for this resource. Tags map[string]*string `min:"1" type:"map"` @@ -6215,6 +6216,12 @@ func (s *GetIntegrationOutput) SetObjectTypeName(v string) *GetIntegrationOutput return s } +// SetObjectTypeNames sets the ObjectTypeNames field's value. +func (s *GetIntegrationOutput) SetObjectTypeNames(v map[string]*string) *GetIntegrationOutput { + s.ObjectTypeNames = v + return s +} + // SetTags sets the Tags field's value. func (s *GetIntegrationOutput) SetTags(v map[string]*string) *GetIntegrationOutput { s.Tags = v @@ -7440,9 +7447,14 @@ type ListIntegrationItem struct { LastUpdatedAt *time.Time `type:"timestamp" required:"true"` // The name of the profile object type. - // - // ObjectTypeName is a required field - ObjectTypeName *string `min:"1" type:"string" required:"true"` + ObjectTypeName *string `min:"1" type:"string"` + + // A map in which each key is an event type from an external application such + // as Segment or Shopify, and each value is an ObjectTypeName (template) used + // to ingest the event. It supports the following event types: SegmentIdentify, + // ShopifyCreateCustomers, ShopifyUpdateCustomers, ShopifyCreateDraftOrders, + // ShopifyUpdateDraftOrders, ShopifyCreateOrders, and ShopifyUpdatedOrders. + ObjectTypeNames map[string]*string `type:"map"` // The tags used to organize, track, or control access for this resource. Tags map[string]*string `min:"1" type:"map"` @@ -7495,6 +7507,12 @@ func (s *ListIntegrationItem) SetObjectTypeName(v string) *ListIntegrationItem { return s } +// SetObjectTypeNames sets the ObjectTypeNames field's value. +func (s *ListIntegrationItem) SetObjectTypeNames(v map[string]*string) *ListIntegrationItem { + s.ObjectTypeNames = v + return s +} + // SetTags sets the Tags field's value. func (s *ListIntegrationItem) SetTags(v map[string]*string) *ListIntegrationItem { s.Tags = v @@ -7966,8 +7984,8 @@ type ListProfileObjectsInput struct { NextToken *string `location:"querystring" locationName:"next-token" min:"1" type:"string"` // Applies a filter to the response to include profile objects with the specified - // index values. This filter is only supported for ObjectTypeName _asset and - // _case. + // index values. This filter is only supported for ObjectTypeName _asset, _case + // and _order. ObjectFilter *ObjectFilter `type:"structure"` // The name of the profile object type. @@ -8616,13 +8634,14 @@ func (s *MergeProfilesOutput) SetMessage(v string) *MergeProfilesOutput { // The filter applied to ListProfileObjects response to include profile objects // with the specified index values. This filter is only supported for ObjectTypeName -// _asset and _case. +// _asset, _case and _order. type ObjectFilter struct { _ struct{} `type:"structure"` // A searchable identifier of a standard profile object. The predefined keys // you can use to search for _asset include: _assetId, _assetName, _serialNumber. - // The predefined keys you can use to search for _case include: _caseId. + // The predefined keys you can use to search for _case include: _caseId. The + // predefined keys you can use to search for _order include: _orderId. // // KeyName is a required field KeyName *string `min:"1" type:"string" required:"true"` @@ -8760,15 +8779,15 @@ type ObjectTypeKey struct { FieldNames []*string `type:"list"` // The types of keys that a ProfileObject can have. Each ProfileObject can have - // only 1 UNIQUE key but multiple PROFILE keys. PROFILE, ASSET or CASE means - // that this key can be used to tie an object to a PROFILE, ASSET or CASE respectively. - // UNIQUE means that it can be used to uniquely identify an object. If a key - // a is marked as SECONDARY, it will be used to search for profiles after all - // other PROFILE keys have been searched. A LOOKUP_ONLY key is only used to - // match a profile but is not persisted to be used for searching of the profile. - // A NEW_ONLY key is only used if the profile does not already exist before - // the object is ingested, otherwise it is only used for matching objects to - // profiles. + // only 1 UNIQUE key but multiple PROFILE keys. PROFILE, ASSET, CASE, or ORDER + // means that this key can be used to tie an object to a PROFILE, ASSET, CASE, + // or ORDER respectively. UNIQUE means that it can be used to uniquely identify + // an object. If a key a is marked as SECONDARY, it will be used to search for + // profiles after all other PROFILE keys have been searched. A LOOKUP_ONLY key + // is only used to match a profile but is not persisted to be used for searching + // of the profile. A NEW_ONLY key is only used if the profile does not already + // exist before the object is ingested, otherwise it is only used for matching + // objects to profiles. StandardIdentifiers []*string `type:"list"` } @@ -9039,9 +9058,14 @@ type PutIntegrationInput struct { FlowDefinition *FlowDefinition `type:"structure"` // The name of the profile object type. - // - // ObjectTypeName is a required field - ObjectTypeName *string `min:"1" type:"string" required:"true"` + ObjectTypeName *string `min:"1" type:"string"` + + // A map in which each key is an event type from an external application such + // as Segment or Shopify, and each value is an ObjectTypeName (template) used + // to ingest the event. It supports the following event types: SegmentIdentify, + // ShopifyCreateCustomers, ShopifyUpdateCustomers, ShopifyCreateDraftOrders, + // ShopifyUpdateDraftOrders, ShopifyCreateOrders, and ShopifyUpdatedOrders. + ObjectTypeNames map[string]*string `type:"map"` // The tags used to organize, track, or control access for this resource. Tags map[string]*string `min:"1" type:"map"` @@ -9077,9 +9101,6 @@ func (s *PutIntegrationInput) Validate() error { if s.DomainName != nil && len(*s.DomainName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) } - if s.ObjectTypeName == nil { - invalidParams.Add(request.NewErrParamRequired("ObjectTypeName")) - } if s.ObjectTypeName != nil && len(*s.ObjectTypeName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ObjectTypeName", 1)) } @@ -9119,6 +9140,12 @@ func (s *PutIntegrationInput) SetObjectTypeName(v string) *PutIntegrationInput { return s } +// SetObjectTypeNames sets the ObjectTypeNames field's value. +func (s *PutIntegrationInput) SetObjectTypeNames(v map[string]*string) *PutIntegrationInput { + s.ObjectTypeNames = v + return s +} + // SetTags sets the Tags field's value. func (s *PutIntegrationInput) SetTags(v map[string]*string) *PutIntegrationInput { s.Tags = v @@ -9150,9 +9177,14 @@ type PutIntegrationOutput struct { LastUpdatedAt *time.Time `type:"timestamp" required:"true"` // The name of the profile object type. - // - // ObjectTypeName is a required field - ObjectTypeName *string `min:"1" type:"string" required:"true"` + ObjectTypeName *string `min:"1" type:"string"` + + // A map in which each key is an event type from an external application such + // as Segment or Shopify, and each value is an ObjectTypeName (template) used + // to ingest the event. It supports the following event types: SegmentIdentify, + // ShopifyCreateCustomers, ShopifyUpdateCustomers, ShopifyCreateDraftOrders, + // ShopifyUpdateDraftOrders, ShopifyCreateOrders, and ShopifyUpdatedOrders. + ObjectTypeNames map[string]*string `type:"map"` // The tags used to organize, track, or control access for this resource. Tags map[string]*string `min:"1" type:"map"` @@ -9205,6 +9237,12 @@ func (s *PutIntegrationOutput) SetObjectTypeName(v string) *PutIntegrationOutput return s } +// SetObjectTypeNames sets the ObjectTypeNames field's value. +func (s *PutIntegrationOutput) SetObjectTypeNames(v map[string]*string) *PutIntegrationOutput { + s.ObjectTypeNames = v + return s +} + // SetTags sets the Tags field's value. func (s *PutIntegrationOutput) SetTags(v map[string]*string) *PutIntegrationOutput { s.Tags = v @@ -10069,9 +10107,11 @@ type SearchProfilesInput struct { DomainName *string `location:"uri" locationName:"DomainName" min:"1" type:"string" required:"true"` // A searchable identifier of a customer profile. The predefined keys you can - // use to search include: _account, _profileId, _fullName, _phone, _email, _ctrContactId, - // _marketoLeadId, _salesforceAccountId, _salesforceContactId, _zendeskUserId, - // _zendeskExternalId, _serviceNowSystemId. + // use to search include: _account, _profileId, _assetId, _caseId, _orderId, + // _fullName, _phone, _email, _ctrContactId, _marketoLeadId, _salesforceAccountId, + // _salesforceContactId, _salesforceAssetId, _zendeskUserId, _zendeskExternalId, + // _zendeskTicketId, _serviceNowSystemId, _serviceNowIncidentId, _segmentUserId, + // _shopifyCustomerId, _shopifyOrderId. // // KeyName is a required field KeyName *string `min:"1" type:"string" required:"true"` @@ -12199,6 +12239,9 @@ const ( // StandardIdentifierNewOnly is a StandardIdentifier enum value StandardIdentifierNewOnly = "NEW_ONLY" + + // StandardIdentifierOrder is a StandardIdentifier enum value + StandardIdentifierOrder = "ORDER" ) // StandardIdentifier_Values returns all elements of the StandardIdentifier enum @@ -12211,6 +12254,7 @@ func StandardIdentifier_Values() []string { StandardIdentifierSecondary, StandardIdentifierLookupOnly, StandardIdentifierNewOnly, + StandardIdentifierOrder, } } diff --git a/service/datasync/api.go b/service/datasync/api.go index 4758828d554..870eab70037 100644 --- a/service/datasync/api.go +++ b/service/datasync/api.go @@ -286,6 +286,88 @@ func (c *DataSync) CreateLocationEfsWithContext(ctx aws.Context, input *CreateLo return out, req.Send() } +const opCreateLocationFsxLustre = "CreateLocationFsxLustre" + +// CreateLocationFsxLustreRequest generates a "aws/request.Request" representing the +// client's request for the CreateLocationFsxLustre operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLocationFsxLustre for more information on using the CreateLocationFsxLustre +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLocationFsxLustreRequest method. +// req, resp := client.CreateLocationFsxLustreRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationFsxLustre +func (c *DataSync) CreateLocationFsxLustreRequest(input *CreateLocationFsxLustreInput) (req *request.Request, output *CreateLocationFsxLustreOutput) { + op := &request.Operation{ + Name: opCreateLocationFsxLustre, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLocationFsxLustreInput{} + } + + output = &CreateLocationFsxLustreOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLocationFsxLustre API operation for AWS DataSync. +// +// Creates an endpoint for an Amazon FSx for Lustre file system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation CreateLocationFsxLustre for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationFsxLustre +func (c *DataSync) CreateLocationFsxLustre(input *CreateLocationFsxLustreInput) (*CreateLocationFsxLustreOutput, error) { + req, out := c.CreateLocationFsxLustreRequest(input) + return out, req.Send() +} + +// CreateLocationFsxLustreWithContext is the same as CreateLocationFsxLustre with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLocationFsxLustre for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) CreateLocationFsxLustreWithContext(ctx aws.Context, input *CreateLocationFsxLustreInput, opts ...request.Option) (*CreateLocationFsxLustreOutput, error) { + req, out := c.CreateLocationFsxLustreRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateLocationFsxWindows = "CreateLocationFsxWindows" // CreateLocationFsxWindowsRequest generates a "aws/request.Request" representing the @@ -1305,6 +1387,89 @@ func (c *DataSync) DescribeLocationEfsWithContext(ctx aws.Context, input *Descri return out, req.Send() } +const opDescribeLocationFsxLustre = "DescribeLocationFsxLustre" + +// DescribeLocationFsxLustreRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocationFsxLustre operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocationFsxLustre for more information on using the DescribeLocationFsxLustre +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocationFsxLustreRequest method. +// req, resp := client.DescribeLocationFsxLustreRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationFsxLustre +func (c *DataSync) DescribeLocationFsxLustreRequest(input *DescribeLocationFsxLustreInput) (req *request.Request, output *DescribeLocationFsxLustreOutput) { + op := &request.Operation{ + Name: opDescribeLocationFsxLustre, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationFsxLustreInput{} + } + + output = &DescribeLocationFsxLustreOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocationFsxLustre API operation for AWS DataSync. +// +// Returns metadata, such as the path information about an Amazon FSx for Lustre +// location. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation DescribeLocationFsxLustre for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationFsxLustre +func (c *DataSync) DescribeLocationFsxLustre(input *DescribeLocationFsxLustreInput) (*DescribeLocationFsxLustreOutput, error) { + req, out := c.DescribeLocationFsxLustreRequest(input) + return out, req.Send() +} + +// DescribeLocationFsxLustreWithContext is the same as DescribeLocationFsxLustre with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocationFsxLustre for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) DescribeLocationFsxLustreWithContext(ctx aws.Context, input *DescribeLocationFsxLustreInput, opts ...request.Option) (*DescribeLocationFsxLustreOutput, error) { + req, out := c.DescribeLocationFsxLustreRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeLocationFsxWindows = "DescribeLocationFsxWindows" // DescribeLocationFsxWindowsRequest generates a "aws/request.Request" representing the @@ -3977,6 +4142,135 @@ func (s *CreateLocationEfsOutput) SetLocationArn(v string) *CreateLocationEfsOut return s } +type CreateLocationFsxLustreInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the FSx for Lustre file system. + // + // FsxFilesystemArn is a required field + FsxFilesystemArn *string `type:"string" required:"true"` + + // The Amazon Resource Names (ARNs) of the security groups that are used to + // configure the FSx for Lustre file system. + // + // SecurityGroupArns is a required field + SecurityGroupArns []*string `min:"1" type:"list" required:"true"` + + // A subdirectory in the location's path. This subdirectory in the FSx for Lustre + // file system is used to read data from the FSx for Lustre source location + // or write data to the FSx for Lustre destination. + Subdirectory *string `type:"string"` + + // The key-value pair that represents a tag that you want to add to the resource. + // The value can be an empty string. This value helps you manage, filter, and + // search for your resources. We recommend that you create a name tag for your + // location. + Tags []*TagListEntry `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationFsxLustreInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationFsxLustreInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLocationFsxLustreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLocationFsxLustreInput"} + if s.FsxFilesystemArn == nil { + invalidParams.Add(request.NewErrParamRequired("FsxFilesystemArn")) + } + if s.SecurityGroupArns == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityGroupArns")) + } + if s.SecurityGroupArns != nil && len(s.SecurityGroupArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityGroupArns", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFsxFilesystemArn sets the FsxFilesystemArn field's value. +func (s *CreateLocationFsxLustreInput) SetFsxFilesystemArn(v string) *CreateLocationFsxLustreInput { + s.FsxFilesystemArn = &v + return s +} + +// SetSecurityGroupArns sets the SecurityGroupArns field's value. +func (s *CreateLocationFsxLustreInput) SetSecurityGroupArns(v []*string) *CreateLocationFsxLustreInput { + s.SecurityGroupArns = v + return s +} + +// SetSubdirectory sets the Subdirectory field's value. +func (s *CreateLocationFsxLustreInput) SetSubdirectory(v string) *CreateLocationFsxLustreInput { + s.Subdirectory = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLocationFsxLustreInput) SetTags(v []*TagListEntry) *CreateLocationFsxLustreInput { + s.Tags = v + return s +} + +type CreateLocationFsxLustreOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the FSx for Lustre file system location + // that's created. + LocationArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationFsxLustreOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationFsxLustreOutput) GoString() string { + return s.String() +} + +// SetLocationArn sets the LocationArn field's value. +func (s *CreateLocationFsxLustreOutput) SetLocationArn(v string) *CreateLocationFsxLustreOutput { + s.LocationArn = &v + return s +} + type CreateLocationFsxWindowsInput struct { _ struct{} `type:"structure"` @@ -3999,14 +4293,14 @@ type CreateLocationFsxWindowsInput struct { // Password is a required field Password *string `type:"string" required:"true" sensitive:"true"` - // The Amazon Resource Names (ARNs) of the security groups that are to use to + // The Amazon Resource Names (ARNs) of the security groups that are used to // configure the FSx for Windows File Server file system. // // SecurityGroupArns is a required field SecurityGroupArns []*string `min:"1" type:"list" required:"true"` - // A subdirectory in the location’s path. This subdirectory in the Amazon - // FSx for Windows File Server file system is used to read data from the Amazon + // A subdirectory in the location's path. This subdirectory in the Amazon FSx + // for Windows File Server file system is used to read data from the Amazon // FSx for Windows File Server source location or write data to the FSx for // Windows File Server destination. Subdirectory *string `type:"string"` @@ -5833,6 +6127,111 @@ func (s *DescribeLocationEfsOutput) SetLocationUri(v string) *DescribeLocationEf return s } +type DescribeLocationFsxLustreInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the FSx for Lustre location to describe. + // + // LocationArn is a required field + LocationArn *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationFsxLustreInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationFsxLustreInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocationFsxLustreInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocationFsxLustreInput"} + if s.LocationArn == nil { + invalidParams.Add(request.NewErrParamRequired("LocationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationFsxLustreInput) SetLocationArn(v string) *DescribeLocationFsxLustreInput { + s.LocationArn = &v + return s +} + +type DescribeLocationFsxLustreOutput struct { + _ struct{} `type:"structure"` + + // The time that the FSx for Lustre location was created. + CreationTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the FSx for Lustre location that was described. + LocationArn *string `type:"string"` + + // The URI of the FSx for Lustre location that was described. + LocationUri *string `type:"string"` + + // The Amazon Resource Names (ARNs) of the security groups that are configured + // for the FSx for Lustre file system. + SecurityGroupArns []*string `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationFsxLustreOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationFsxLustreOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeLocationFsxLustreOutput) SetCreationTime(v time.Time) *DescribeLocationFsxLustreOutput { + s.CreationTime = &v + return s +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationFsxLustreOutput) SetLocationArn(v string) *DescribeLocationFsxLustreOutput { + s.LocationArn = &v + return s +} + +// SetLocationUri sets the LocationUri field's value. +func (s *DescribeLocationFsxLustreOutput) SetLocationUri(v string) *DescribeLocationFsxLustreOutput { + s.LocationUri = &v + return s +} + +// SetSecurityGroupArns sets the SecurityGroupArns field's value. +func (s *DescribeLocationFsxLustreOutput) SetSecurityGroupArns(v []*string) *DescribeLocationFsxLustreOutput { + s.SecurityGroupArns = v + return s +} + type DescribeLocationFsxWindowsInput struct { _ struct{} `type:"structure"` @@ -8054,7 +8453,7 @@ type LocationListEntry struct { // is the prefix path that you want to mount and use as the root of the location. LocationArn *string `type:"string"` - // Represents a list of URLs of a location. LocationUri returns an array that + // Represents a list of URIs of a location. LocationUri returns an array that // contains a list of locations when the ListLocations (https://docs.aws.amazon.com/datasync/latest/userguide/API_ListLocations.html) // operation is called. // diff --git a/service/datasync/datasynciface/interface.go b/service/datasync/datasynciface/interface.go index 0804f822f9a..139aac58da2 100644 --- a/service/datasync/datasynciface/interface.go +++ b/service/datasync/datasynciface/interface.go @@ -72,6 +72,10 @@ type DataSyncAPI interface { CreateLocationEfsWithContext(aws.Context, *datasync.CreateLocationEfsInput, ...request.Option) (*datasync.CreateLocationEfsOutput, error) CreateLocationEfsRequest(*datasync.CreateLocationEfsInput) (*request.Request, *datasync.CreateLocationEfsOutput) + CreateLocationFsxLustre(*datasync.CreateLocationFsxLustreInput) (*datasync.CreateLocationFsxLustreOutput, error) + CreateLocationFsxLustreWithContext(aws.Context, *datasync.CreateLocationFsxLustreInput, ...request.Option) (*datasync.CreateLocationFsxLustreOutput, error) + CreateLocationFsxLustreRequest(*datasync.CreateLocationFsxLustreInput) (*request.Request, *datasync.CreateLocationFsxLustreOutput) + CreateLocationFsxWindows(*datasync.CreateLocationFsxWindowsInput) (*datasync.CreateLocationFsxWindowsOutput, error) CreateLocationFsxWindowsWithContext(aws.Context, *datasync.CreateLocationFsxWindowsInput, ...request.Option) (*datasync.CreateLocationFsxWindowsOutput, error) CreateLocationFsxWindowsRequest(*datasync.CreateLocationFsxWindowsInput) (*request.Request, *datasync.CreateLocationFsxWindowsOutput) @@ -120,6 +124,10 @@ type DataSyncAPI interface { DescribeLocationEfsWithContext(aws.Context, *datasync.DescribeLocationEfsInput, ...request.Option) (*datasync.DescribeLocationEfsOutput, error) DescribeLocationEfsRequest(*datasync.DescribeLocationEfsInput) (*request.Request, *datasync.DescribeLocationEfsOutput) + DescribeLocationFsxLustre(*datasync.DescribeLocationFsxLustreInput) (*datasync.DescribeLocationFsxLustreOutput, error) + DescribeLocationFsxLustreWithContext(aws.Context, *datasync.DescribeLocationFsxLustreInput, ...request.Option) (*datasync.DescribeLocationFsxLustreOutput, error) + DescribeLocationFsxLustreRequest(*datasync.DescribeLocationFsxLustreInput) (*request.Request, *datasync.DescribeLocationFsxLustreOutput) + DescribeLocationFsxWindows(*datasync.DescribeLocationFsxWindowsInput) (*datasync.DescribeLocationFsxWindowsOutput, error) DescribeLocationFsxWindowsWithContext(aws.Context, *datasync.DescribeLocationFsxWindowsInput, ...request.Option) (*datasync.DescribeLocationFsxWindowsOutput, error) DescribeLocationFsxWindowsRequest(*datasync.DescribeLocationFsxWindowsInput) (*request.Request, *datasync.DescribeLocationFsxWindowsOutput) diff --git a/service/devopsguru/api.go b/service/devopsguru/api.go index 48e0e25f2dd..c2027a5a47d 100644 --- a/service/devopsguru/api.go +++ b/service/devopsguru/api.go @@ -5077,6 +5077,34 @@ type DescribeOrganizationResourceCollectionHealthOutput struct { // An array of ServiceHealth objects that describes the health of the Amazon // Web Services services associated with the resources in the collection. Service []*ServiceHealth `type:"list"` + + // Tags help you identify and organize your Amazon Web Services resources. Many + // Amazon Web Services services support tagging, so you can assign the same + // tag to resources from different services to indicate that the resources are + // related. For example, you can assign the same tag to an Amazon DynamoDB table + // resource that you assign to an Lambda function. For more information about + // using tags, see the Tagging best practices (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf) + // whitepaper. + // + // Each Amazon Web Services tag has two parts. + // + // * A tag key (for example, CostCenter, Environment, Project, or Secret). + // Tag keys are case-sensitive. + // + // * An optional field known as a tag value (for example, 111122223333, Production, + // or a team name). Omitting the tag value is the same as using an empty + // string. Like tag keys, tag values are case-sensitive. + // + // Together these are known as key-value pairs. + // + // The string used for a key in a tag that you use to define your resource coverage + // must begin with the prefix Devops-guru-. The tag key might be Devops-guru-deployment-application + // or Devops-guru-rds-application. While keys are case-sensitive, the case of + // key characters don't matter to DevOps Guru. For example, DevOps Guru works + // with a key named devops-guru-rds and a key named DevOps-Guru-RDS. Possible + // key/value pairs in your application might be Devops-Guru-production-application/RDS + // or Devops-Guru-production-application/containers. + Tags []*TagHealth `type:"list"` } // String returns the string representation. @@ -5121,6 +5149,12 @@ func (s *DescribeOrganizationResourceCollectionHealthOutput) SetService(v []*Ser return s } +// SetTags sets the Tags field's value. +func (s *DescribeOrganizationResourceCollectionHealthOutput) SetTags(v []*TagHealth) *DescribeOrganizationResourceCollectionHealthOutput { + s.Tags = v + return s +} + type DescribeResourceCollectionHealthInput struct { _ struct{} `type:"structure" nopayload:"true"` @@ -12188,6 +12222,9 @@ const ( // OrganizationResourceCollectionTypeAwsAccount is a OrganizationResourceCollectionType enum value OrganizationResourceCollectionTypeAwsAccount = "AWS_ACCOUNT" + + // OrganizationResourceCollectionTypeAwsTags is a OrganizationResourceCollectionType enum value + OrganizationResourceCollectionTypeAwsTags = "AWS_TAGS" ) // OrganizationResourceCollectionType_Values returns all elements of the OrganizationResourceCollectionType enum @@ -12196,6 +12233,7 @@ func OrganizationResourceCollectionType_Values() []string { OrganizationResourceCollectionTypeAwsCloudFormation, OrganizationResourceCollectionTypeAwsService, OrganizationResourceCollectionTypeAwsAccount, + OrganizationResourceCollectionTypeAwsTags, } } diff --git a/service/finspacedata/api.go b/service/finspacedata/api.go index 8af8dfc6fd9..7e1a66fd7d4 100644 --- a/service/finspacedata/api.go +++ b/service/finspacedata/api.go @@ -1631,6 +1631,9 @@ func (s *ChangesetErrorInfo) SetErrorMessage(v string) *ChangesetErrorInfo { type ChangesetSummary struct { _ struct{} `type:"structure"` + // Milliseconds since UTC epoch + ActiveFromTimestamp *int64 `locationName:"activeFromTimestamp" type:"long"` + // Time until which the Changeset is active. The value is determined as Epoch // time in milliseconds. For example, the value for Monday, November 1, 2021 // 12:00:00 PM UTC is specified as 1635768000000. @@ -1710,6 +1713,12 @@ func (s ChangesetSummary) GoString() string { return s.String() } +// SetActiveFromTimestamp sets the ActiveFromTimestamp field's value. +func (s *ChangesetSummary) SetActiveFromTimestamp(v int64) *ChangesetSummary { + s.ActiveFromTimestamp = &v + return s +} + // SetActiveUntilTimestamp sets the ActiveUntilTimestamp field's value. func (s *ChangesetSummary) SetActiveUntilTimestamp(v int64) *ChangesetSummary { s.ActiveUntilTimestamp = &v @@ -2238,17 +2247,13 @@ type CreateDatasetInput struct { _ struct{} `type:"structure"` // The unique resource identifier for a Dataset. - // - // Alias is a required field - Alias *string `locationName:"alias" min:"1" type:"string" required:"true"` + Alias *string `locationName:"alias" min:"1" type:"string"` // A token used to ensure idempotency. ClientToken *string `locationName:"clientToken" min:"1" type:"string" idempotencyToken:"true"` // Description of a Dataset. - // - // DatasetDescription is a required field - DatasetDescription *string `locationName:"datasetDescription" min:"1" type:"string" required:"true"` + DatasetDescription *string `locationName:"datasetDescription" min:"1" type:"string"` // Display title for a FinSpace Dataset. // @@ -2297,18 +2302,12 @@ func (s CreateDatasetInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateDatasetInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateDatasetInput"} - if s.Alias == nil { - invalidParams.Add(request.NewErrParamRequired("Alias")) - } if s.Alias != nil && len(*s.Alias) < 1 { invalidParams.Add(request.NewErrParamMinLen("Alias", 1)) } if s.ClientToken != nil && len(*s.ClientToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) } - if s.DatasetDescription == nil { - invalidParams.Add(request.NewErrParamRequired("DatasetDescription")) - } if s.DatasetDescription != nil && len(*s.DatasetDescription) < 1 { invalidParams.Add(request.NewErrParamMinLen("DatasetDescription", 1)) } @@ -2481,6 +2480,11 @@ type DataViewDestinationTypeParams struct { // // DestinationType is a required field DestinationType *string `locationName:"destinationType" type:"string" required:"true"` + + // Data View Export File Format + S3DestinationExportFileFormat *string `locationName:"s3DestinationExportFileFormat" type:"string" enum:"ExportFileFormat"` + + S3DestinationExportFileFormatOptions map[string]*string `locationName:"s3DestinationExportFileFormatOptions" type:"map"` } // String returns the string representation. @@ -2520,6 +2524,18 @@ func (s *DataViewDestinationTypeParams) SetDestinationType(v string) *DataViewDe return s } +// SetS3DestinationExportFileFormat sets the S3DestinationExportFileFormat field's value. +func (s *DataViewDestinationTypeParams) SetS3DestinationExportFileFormat(v string) *DataViewDestinationTypeParams { + s.S3DestinationExportFileFormat = &v + return s +} + +// SetS3DestinationExportFileFormatOptions sets the S3DestinationExportFileFormatOptions field's value. +func (s *DataViewDestinationTypeParams) SetS3DestinationExportFileFormatOptions(v map[string]*string) *DataViewDestinationTypeParams { + s.S3DestinationExportFileFormatOptions = v + return s +} + // The structure with error messages. type DataViewErrorInfo struct { _ struct{} `type:"structure"` @@ -3088,6 +3104,9 @@ func (s *GetChangesetInput) SetDatasetId(v string) *GetChangesetInput { type GetChangesetOutput struct { _ struct{} `type:"structure"` + // Milliseconds since UTC epoch + ActiveFromTimestamp *int64 `locationName:"activeFromTimestamp" type:"long"` + // Time until which the Changeset is active. The value is determined as Epoch // time in milliseconds. For example, the value for Monday, November 1, 2021 // 12:00:00 PM UTC is specified as 1635768000000. @@ -3156,6 +3175,12 @@ func (s GetChangesetOutput) GoString() string { return s.String() } +// SetActiveFromTimestamp sets the ActiveFromTimestamp field's value. +func (s *GetChangesetOutput) SetActiveFromTimestamp(v int64) *GetChangesetOutput { + s.ActiveFromTimestamp = &v + return s +} + // SetActiveUntilTimestamp sets the ActiveUntilTimestamp field's value. func (s *GetChangesetOutput) SetActiveUntilTimestamp(v int64) *GetChangesetOutput { s.ActiveUntilTimestamp = &v @@ -4734,9 +4759,7 @@ type UpdateDatasetInput struct { _ struct{} `type:"structure"` // The unique resource identifier for a Dataset. - // - // Alias is a required field - Alias *string `locationName:"alias" min:"1" type:"string" required:"true"` + Alias *string `locationName:"alias" min:"1" type:"string"` // A token used to ensure idempotency. ClientToken *string `locationName:"clientToken" min:"1" type:"string" idempotencyToken:"true"` @@ -4788,9 +4811,6 @@ func (s UpdateDatasetInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateDatasetInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateDatasetInput"} - if s.Alias == nil { - invalidParams.Add(request.NewErrParamRequired("Alias")) - } if s.Alias != nil && len(*s.Alias) < 1 { invalidParams.Add(request.NewErrParamMinLen("Alias", 1)) } @@ -5162,6 +5182,23 @@ func ErrorCategory_Values() []string { } } +// Data View Export File Format +const ( + // ExportFileFormatParquet is a ExportFileFormat enum value + ExportFileFormatParquet = "PARQUET" + + // ExportFileFormatDelimitedText is a ExportFileFormat enum value + ExportFileFormatDelimitedText = "DELIMITED_TEXT" +) + +// ExportFileFormat_Values returns all elements of the ExportFileFormat enum +func ExportFileFormat_Values() []string { + return []string{ + ExportFileFormatParquet, + ExportFileFormatDelimitedText, + } +} + // Status of the ingestion process returned from scheduler service. const ( // IngestionStatusPending is a IngestionStatus enum value diff --git a/service/forecastservice/api.go b/service/forecastservice/api.go index ae842fe021b..6a8ef718659 100644 --- a/service/forecastservice/api.go +++ b/service/forecastservice/api.go @@ -560,7 +560,7 @@ func (c *ForecastService) CreateExplainabilityRequest(input *CreateExplainabilit // // CreateExplainability with a Forecast ARN // -// You can specify a maximum of 50 time series and 1500 time points. +// You can specify a maximum of 50 time series and 500 time points. // // The following parameters are required when providing a Predictor ARN: // @@ -1149,9 +1149,9 @@ func (c *ForecastService) CreatePredictorBacktestExportJobRequest(input *CreateP // CreatePredictorBacktestExportJob API operation for Amazon Forecast Service. // -// Exports backtest forecasts and accuracy metrics generated by the CreatePredictor -// operation. Two folders containing CSV files are exported to your specified -// S3 bucket. +// Exports backtest forecasts and accuracy metrics generated by the CreateAutoPredictor +// or CreatePredictor operations. Two folders containing CSV files are exported +// to your specified S3 bucket. // // The export file names will match the following conventions: // @@ -1626,7 +1626,7 @@ func (c *ForecastService) DeleteExplainabilityExportRequest(input *DeleteExplain // DeleteExplainabilityExport API operation for Amazon Forecast Service. // -// Deletes an Explainability export job. +// Deletes an Explainability export. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1897,9 +1897,9 @@ func (c *ForecastService) DeletePredictorRequest(input *DeletePredictorInput) (r // DeletePredictor API operation for Amazon Forecast Service. // -// Deletes a predictor created using the CreatePredictor operation. You can -// delete only predictor that have a status of ACTIVE or CREATE_FAILED. To get -// the status, use the DescribePredictor operation. +// Deletes a predictor created using the DescribePredictor or CreatePredictor +// operations. You can delete only predictor that have a status of ACTIVE or +// CREATE_FAILED. To get the status, use the DescribePredictor operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2920,8 +2920,6 @@ func (c *ForecastService) DescribePredictorRequest(input *DescribePredictorInput // This operation is only valid for legacy predictors created with CreatePredictor. // If you are not using a legacy predictor, use DescribeAutoPredictor. // -// To upgrade a legacy predictor to AutoPredictor, see Upgrading to AutoPredictor. -// // Describes a predictor created using the CreatePredictor operation. // // In addition to listing the properties provided in the CreatePredictor request, @@ -4268,10 +4266,12 @@ func (c *ForecastService) ListPredictorsRequest(input *ListPredictorsInput) (req // ListPredictors API operation for Amazon Forecast Service. // -// Returns a list of predictors created using the CreatePredictor operation. -// For each predictor, this operation returns a summary of its properties, including -// its Amazon Resource Name (ARN). You can retrieve the complete set of properties -// by using the ARN with the DescribePredictor operation. You can filter the +// Returns a list of predictors created using the CreateAutoPredictor or CreatePredictor +// operations. For each predictor, this operation returns a summary of its properties, +// including its Amazon Resource Name (ARN). +// +// You can retrieve the complete set of properties by using the ARN with the +// DescribeAutoPredictor and DescribePredictor operations. You can filter the // list using an array of Filter objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4510,6 +4510,10 @@ func (c *ForecastService) StopResourceRequest(input *StopResourceInput) (req *re // // * Predictor Backtest Export Job // +// * Explainability Job +// +// * Explainability Export Job +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4845,8 +4849,8 @@ type AdditionalDataset struct { // // Holidays // - // To enable Holidays, specify a country with one of the following two-letter - // country codes: + // To enable Holidays, set CountryCode to one of the following two-letter country + // codes: // // * "AL" - ALBANIA // @@ -5337,6 +5341,7 @@ type CreateAutoPredictorInput struct { // this optional object in the CreateDataset and CreatePredictor requests. EncryptionConfig *EncryptionConfig `type:"structure"` + // Create an Explainability resource for the predictor. ExplainPredictor *bool `type:"boolean"` // An array of dimension (field) names that specify how to group the generated @@ -6344,10 +6349,9 @@ func (s *CreateExplainabilityExportOutput) SetExplainabilityExportArn(v string) type CreateExplainabilityInput struct { _ struct{} `type:"structure"` - // The source of your training data, an AWS Identity and Access Management (IAM) - // role that allows Amazon Forecast to access the data and, optionally, an AWS - // Key Management Service (KMS) key. This object is submitted in the CreateDatasetImportJob - // request. + // The source of your data, an AWS Identity and Access Management (IAM) role + // that allows Amazon Forecast to access the data and, optionally, an AWS Key + // Management Service (KMS) key. DataSource *DataSource `type:"structure"` // Create an Expainability visualization that is viewable within the AWS console. @@ -6355,6 +6359,8 @@ type CreateExplainabilityInput struct { // If TimePointGranularity is set to SPECIFIC, define the last time point for // the Explainability. + // + // Use the following timestamp format: yyyy-MM-ddTHH:mm:ss (example: 2015-01-01T20:00:00) EndDateTime *string `type:"string"` // The configuration settings that define the granularity of time series and @@ -6374,12 +6380,13 @@ type CreateExplainabilityInput struct { // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` - // Defines the fields of a dataset. You specify this object in the CreateDataset - // request. + // Defines the fields of a dataset. Schema *Schema `type:"structure"` // If TimePointGranularity is set to SPECIFIC, define the first point for the // Explainability. + // + // Use the following timestamp format: yyyy-MM-ddTHH:mm:ss (example: 2015-01-01T20:00:00) StartDateTime *string `type:"string"` // Optional metadata to help you categorize and organize your resources. Each @@ -7545,15 +7552,14 @@ func (s *DataDestination) SetS3Config(v *S3Config) *DataDestination { return s } -// The source of your training data, an AWS Identity and Access Management (IAM) -// role that allows Amazon Forecast to access the data and, optionally, an AWS -// Key Management Service (KMS) key. This object is submitted in the CreateDatasetImportJob -// request. +// The source of your data, an AWS Identity and Access Management (IAM) role +// that allows Amazon Forecast to access the data and, optionally, an AWS Key +// Management Service (KMS) key. type DataSource struct { _ struct{} `type:"structure"` - // The path to the training data stored in an Amazon Simple Storage Service - // (Amazon S3) bucket along with the credentials to access the data. + // The path to the data stored in an Amazon Simple Storage Service (Amazon S3) + // bucket along with the credentials to access the data. // // S3Config is a required field S3Config *S3Config `type:"structure" required:"true"` @@ -8607,8 +8613,13 @@ type DescribeAutoPredictorOutput struct { // complete. EstimatedTimeRemainingInMinutes *int64 `type:"long"` + // Provides the status and ARN of the Predictor Explainability. ExplainabilityInfo *ExplainabilityInfo `type:"structure"` + // An array of dimension (field) names that specify the attributes used to group + // your time series. + ForecastDimensions []*string `min:"1" type:"list"` + // The frequency of predictions in a forecast. // // Valid intervals are Y (Year), M (Month), W (Week), D (Day), H (Hour), 30min @@ -8720,6 +8731,12 @@ func (s *DescribeAutoPredictorOutput) SetExplainabilityInfo(v *ExplainabilityInf return s } +// SetForecastDimensions sets the ForecastDimensions field's value. +func (s *DescribeAutoPredictorOutput) SetForecastDimensions(v []*string) *DescribeAutoPredictorOutput { + s.ForecastDimensions = v + return s +} + // SetForecastFrequency sets the ForecastFrequency field's value. func (s *DescribeAutoPredictorOutput) SetForecastFrequency(v string) *DescribeAutoPredictorOutput { s.ForecastFrequency = &v @@ -9558,10 +9575,9 @@ type DescribeExplainabilityOutput struct { // When the Explainability resource was created. CreationTime *time.Time `type:"timestamp"` - // The source of your training data, an AWS Identity and Access Management (IAM) - // role that allows Amazon Forecast to access the data and, optionally, an AWS - // Key Management Service (KMS) key. This object is submitted in the CreateDatasetImportJob - // request. + // The source of your data, an AWS Identity and Access Management (IAM) role + // that allows Amazon Forecast to access the data and, optionally, an AWS Key + // Management Service (KMS) key. DataSource *DataSource `type:"structure"` // Whether the visualization was enabled for the Explainability resource. @@ -9605,8 +9621,7 @@ type DescribeExplainabilityOutput struct { // the Explainability resource. ResourceArn *string `type:"string"` - // Defines the fields of a dataset. You specify this object in the CreateDataset - // request. + // Defines the fields of a dataset. Schema *Schema `type:"structure"` // If TimePointGranularity is set to SPECIFIC, the first time point in the Explainability. @@ -10979,11 +10994,22 @@ func (s *ExplainabilityExportSummary) SetStatus(v string) *ExplainabilityExportS return s } +// Provides information about the Explainability resource. type ExplainabilityInfo struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the Explainability. ExplainabilityArn *string `type:"string"` + // The status of the Explainability. States include: + // + // * ACTIVE + // + // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // + // * CREATE_STOPPING, CREATE_STOPPED + // + // * DELETE_PENDING, DELETE_IN_PROGRESS, DELETE_FAILED Status *string `type:"string"` } @@ -11136,6 +11162,10 @@ func (s *ExplainabilitySummary) SetStatus(v string) *ExplainabilitySummary { return s } +// +// This object belongs to the CreatePredictor operation. If you created your +// predictor with CreateAutoPredictor, see AttributeConfig. +// // Provides featurization (transformation) information for a dataset field. // This object is part of the FeaturizationConfig object. // @@ -11230,6 +11260,10 @@ func (s *Featurization) SetFeaturizationPipeline(v []*FeaturizationMethod) *Feat return s } +// +// This object belongs to the CreatePredictor operation. If you created your +// predictor with CreateAutoPredictor, see AttributeConfig. +// // In a CreatePredictor operation, the specified algorithm trains a model using // the specified dataset group. You can optionally tell the operation to modify // data fields prior to training a model. These modifications are referred to @@ -11646,6 +11680,7 @@ func (s *ForecastExportJobSummary) SetStatus(v string) *ForecastExportJobSummary type ForecastSummary struct { _ struct{} `type:"structure"` + // Whether the Forecast was created from an AutoPredictor. CreatedUsingAutoPredictor *bool `type:"boolean"` // When the forecast creation task was created. @@ -11939,6 +11974,10 @@ func (s *HyperParameterTuningJobConfig) SetParameterRanges(v *ParameterRanges) * return s } +// +// This object belongs to the CreatePredictor operation. If you created your +// predictor with CreateAutoPredictor, see DataConfig. +// // The data used to train a predictor. The data includes a dataset group and // any supplementary features. You specify this object in the CreatePredictor // request. @@ -12667,7 +12706,7 @@ type ListExplainabilitiesInput struct { // // * Condition - The condition to apply. Valid values are IS and IS_NOT. // - // * Key - The name of the parameter to filter on. Valid values are PredictorArn + // * Key - The name of the parameter to filter on. Valid values are ResourceArn // and Status. // // * Value - The value to match. @@ -12798,7 +12837,7 @@ type ListExplainabilityExportsInput struct { // // * Condition - The condition to apply. Valid values are IS and IS_NOT. // - // * Key - The name of the parameter to filter on. Valid values are PredictorArn + // * Key - The name of the parameter to filter on. Valid values are ResourceArn // and Status. // // * Value - The value to match. @@ -13467,9 +13506,7 @@ type ListTagsForResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that identifies the resource for which to - // list the tags. Currently, the supported resources are Forecast dataset groups, - // datasets, dataset import jobs, predictors, forecasts, and forecast export - // jobs. + // list the tags. // // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` @@ -14348,8 +14385,7 @@ func (s *S3Config) SetRoleArn(v string) *S3Config { return s } -// Defines the fields of a dataset. You specify this object in the CreateDataset -// request. +// Defines the fields of a dataset. type Schema struct { _ struct{} `type:"structure"` @@ -14605,7 +14641,7 @@ type StopResourceInput struct { // The Amazon Resource Name (ARN) that identifies the resource to stop. The // supported ARNs are DatasetImportJobArn, PredictorArn, PredictorBacktestExportJobArn, - // ForecastArn, and ForecastExportJobArn. + // ForecastArn, ForecastExportJobArn, ExplainabilityArn, and ExplainabilityExportArn. // // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` @@ -14670,6 +14706,10 @@ func (s StopResourceOutput) GoString() string { return s.String() } +// +// This object belongs to the CreatePredictor operation. If you created your +// predictor with CreateAutoPredictor, see AdditionalDataset. +// // Describes a supplementary feature of a dataset group. This object is part // of the InputDataConfig object. Forecast supports the Weather Index and Holidays // built-in featurizations. @@ -14997,9 +15037,7 @@ type TagResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that identifies the resource for which to - // list the tags. Currently, the supported resources are Forecast dataset groups, - // datasets, dataset import jobs, predictors, forecasts, and forecast export - // jobs. + // list the tags. // // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` @@ -15184,8 +15222,7 @@ type UntagResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that identifies the resource for which to - // list the tags. Currently, the supported resources are Forecast dataset groups, - // datasets, dataset import jobs, predictors, forecasts, and forecast exports. + // list the tags. // // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` diff --git a/service/imagebuilder/api.go b/service/imagebuilder/api.go index 06eff4f3f71..2e9c4929197 100644 --- a/service/imagebuilder/api.go +++ b/service/imagebuilder/api.go @@ -2785,6 +2785,101 @@ func (c *Imagebuilder) ImportComponentWithContext(ctx aws.Context, input *Import return out, req.Send() } +const opImportVmImage = "ImportVmImage" + +// ImportVmImageRequest generates a "aws/request.Request" representing the +// client's request for the ImportVmImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportVmImage for more information on using the ImportVmImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportVmImageRequest method. +// req, resp := client.ImportVmImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/imagebuilder-2019-12-02/ImportVmImage +func (c *Imagebuilder) ImportVmImageRequest(input *ImportVmImageInput) (req *request.Request, output *ImportVmImageOutput) { + op := &request.Operation{ + Name: opImportVmImage, + HTTPMethod: "PUT", + HTTPPath: "/ImportVmImage", + } + + if input == nil { + input = &ImportVmImageInput{} + } + + output = &ImportVmImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportVmImage API operation for EC2 Image Builder. +// +// When you export your virtual machine (VM) from its virtualization environment, +// that process creates a set of one or more disk container files that act as +// snapshots of your VM’s environment, settings, and data. The Amazon EC2 +// API ImportImage (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportImage.html) +// action uses those files to import your VM and create an AMI. To import using +// the CLI command, see import-image (https://docs.aws.amazon.com/cli/latest/reference/ec2/import-image.html) +// +// You can reference the task ID from the VM import to pull in the AMI that +// the import created as the base image for your Image Builder recipe. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for EC2 Image Builder's +// API operation ImportVmImage for usage and error information. +// +// Returned Error Types: +// * ServiceException +// This exception is thrown when the service encounters an unrecoverable exception. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an invalid resource identifier. +// +// * ServiceUnavailableException +// The service is unable to process your request at this time. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/imagebuilder-2019-12-02/ImportVmImage +func (c *Imagebuilder) ImportVmImage(input *ImportVmImageInput) (*ImportVmImageOutput, error) { + req, out := c.ImportVmImageRequest(input) + return out, req.Send() +} + +// ImportVmImageWithContext is the same as ImportVmImage with the addition of +// the ability to pass a context and additional request options. +// +// See ImportVmImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Imagebuilder) ImportVmImageWithContext(ctx aws.Context, input *ImportVmImageInput, opts ...request.Option) (*ImportVmImageOutput, error) { + req, out := c.ImportVmImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListComponentBuildVersions = "ListComponentBuildVersions" // ListComponentBuildVersionsRequest generates a "aws/request.Request" representing the @@ -5971,7 +6066,9 @@ func (s *CallRateLimitExceededException) RequestID() string { type CancelImageCreationInput struct { _ struct{} `type:"structure"` - // The idempotency token used to make this request idempotent. + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html) + // in the Amazon EC2 API Reference. ClientToken *string `locationName:"clientToken" min:"1" type:"string" idempotencyToken:"true"` // The Amazon Resource Name (ARN) of the image whose creation you want to cancel. @@ -6029,7 +6126,7 @@ func (s *CancelImageCreationInput) SetImageBuildVersionArn(v string) *CancelImag type CancelImageCreationOutput struct { _ struct{} `type:"structure"` - // The idempotency token used to make this request idempotent. + // The idempotency token that was used for this request. ClientToken *string `locationName:"clientToken" min:"1" type:"string"` // The Amazon Resource Name (ARN) of the image whose creation has been cancelled. @@ -9501,6 +9598,10 @@ type Distribution struct { // // Region is a required field Region *string `locationName:"region" min:"1" type:"string" required:"true"` + + // Configure export settings to deliver disk images created from your image + // build, using a file format that is compatible with your VMs in that Region. + S3ExportConfiguration *S3ExportConfiguration `locationName:"s3ExportConfiguration" type:"structure"` } // String returns the string representation. @@ -9556,6 +9657,11 @@ func (s *Distribution) Validate() error { } } } + if s.S3ExportConfiguration != nil { + if err := s.S3ExportConfiguration.Validate(); err != nil { + invalidParams.AddNested("S3ExportConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9593,6 +9699,12 @@ func (s *Distribution) SetRegion(v string) *Distribution { return s } +// SetS3ExportConfiguration sets the S3ExportConfiguration field's value. +func (s *Distribution) SetS3ExportConfiguration(v *S3ExportConfiguration) *Distribution { + s.S3ExportConfiguration = v + return s +} + // A distribution configuration. type DistributionConfiguration struct { _ struct{} `type:"structure"` @@ -11057,6 +11169,18 @@ type Image struct { // a specific version of an object. Arn *string `locationName:"arn" type:"string"` + // Indicates the type of build that created this image. The build can be initiated + // in the following ways: + // + // * USER_INITIATED – A manual pipeline build request. + // + // * SCHEDULED – A pipeline build initiated by a cron expression in the + // Image Builder pipeline, or from EventBridge. + // + // * IMPORT – A VM import created the image to use as the base image for + // the recipe. + BuildType *string `locationName:"buildType" type:"string" enum:"BuildType"` + // The recipe that is used to create an Image Builder container image. ContainerRecipe *ContainerRecipe `locationName:"containerRecipe" type:"structure"` @@ -11154,6 +11278,12 @@ func (s *Image) SetArn(v string) *Image { return s } +// SetBuildType sets the BuildType field's value. +func (s *Image) SetBuildType(v string) *Image { + s.BuildType = &v + return s +} + // SetContainerRecipe sets the ContainerRecipe field's value. func (s *Image) SetContainerRecipe(v *ContainerRecipe) *Image { s.ContainerRecipe = v @@ -11767,6 +11897,18 @@ type ImageSummary struct { // The Amazon Resource Name (ARN) of the image. Arn *string `locationName:"arn" type:"string"` + // Indicates the type of build that created this image. The build can be initiated + // in the following ways: + // + // * USER_INITIATED – A manual pipeline build request. + // + // * SCHEDULED – A pipeline build initiated by a cron expression in the + // Image Builder pipeline, or from EventBridge. + // + // * IMPORT – A VM import created the image to use as the base image for + // the recipe. + BuildType *string `locationName:"buildType" type:"string" enum:"BuildType"` + // The date on which this image was created. DateCreated *string `locationName:"dateCreated" type:"string"` @@ -11823,6 +11965,12 @@ func (s *ImageSummary) SetArn(v string) *ImageSummary { return s } +// SetBuildType sets the BuildType field's value. +func (s *ImageSummary) SetBuildType(v string) *ImageSummary { + s.BuildType = &v + return s +} + // SetDateCreated sets the DateCreated field's value. func (s *ImageSummary) SetDateCreated(v string) *ImageSummary { s.DateCreated = &v @@ -11883,11 +12031,14 @@ func (s *ImageSummary) SetVersion(v string) *ImageSummary { return s } -// Image tests configuration. +// Configure image tests for your pipeline build. Tests run after building the +// image, to verify that the AMI or container image is valid before distributing +// it. type ImageTestsConfiguration struct { _ struct{} `type:"structure"` - // Defines if tests should be executed when building this image. + // Determines if tests should run after building the image. Image Builder defaults + // to enable tests to run following the image build, before image distribution. ImageTestsEnabled *bool `locationName:"imageTestsEnabled" type:"boolean"` // The maximum time in minutes that tests are permitted to run. @@ -11957,6 +12108,18 @@ type ImageVersion struct { // a specific version of an object. Arn *string `locationName:"arn" type:"string"` + // Indicates the type of build that created this image. The build can be initiated + // in the following ways: + // + // * USER_INITIATED – A manual pipeline build request. + // + // * SCHEDULED – A pipeline build initiated by a cron expression in the + // Image Builder pipeline, or from EventBridge. + // + // * IMPORT – A VM import created the image to use as the base image for + // the recipe. + BuildType *string `locationName:"buildType" type:"string" enum:"BuildType"` + // The date on which this specific version of the Image Builder image was created. DateCreated *string `locationName:"dateCreated" type:"string"` @@ -12022,6 +12185,12 @@ func (s *ImageVersion) SetArn(v string) *ImageVersion { return s } +// SetBuildType sets the BuildType field's value. +func (s *ImageVersion) SetBuildType(v string) *ImageVersion { + s.BuildType = &v + return s +} + // SetDateCreated sets the DateCreated field's value. func (s *ImageVersion) SetDateCreated(v string) *ImageVersion { s.DateCreated = &v @@ -12312,6 +12481,217 @@ func (s *ImportComponentOutput) SetRequestId(v string) *ImportComponentOutput { return s } +type ImportVmImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier you provide to ensure idempotency of the + // request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html) + // in the Amazon EC2 API Reference. + ClientToken *string `locationName:"clientToken" min:"1" type:"string" idempotencyToken:"true"` + + // The description for the base image that is created by the import process. + Description *string `locationName:"description" min:"1" type:"string"` + + // The name of the base image that is created by the import process. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // The operating system version for the imported VM. + OsVersion *string `locationName:"osVersion" min:"1" type:"string"` + + // The operating system platform for the imported VM. + // + // Platform is a required field + Platform *string `locationName:"platform" type:"string" required:"true" enum:"Platform"` + + // The semantic version to attach to the base image that was created during + // the import process. This version follows the semantic version syntax. + // + // The semantic version has four nodes: