diff --git a/clients/client-appflow/models/models_0.ts b/clients/client-appflow/models/models_0.ts index 37d4af07ab91..b7c36e378124 100644 --- a/clients/client-appflow/models/models_0.ts +++ b/clients/client-appflow/models/models_0.ts @@ -353,6 +353,19 @@ export namespace TrendmicroMetadata { }); } +/** + *
+ * The connector metadata specific to Upsolver. + *
+ */ +export interface UpsolverMetadata {} + +export namespace UpsolverMetadata { + export const filterSensitiveLog = (obj: UpsolverMetadata): any => ({ + ...obj, + }); +} + /** ** The connector metadata specific to Veeva. @@ -394,122 +407,129 @@ export namespace ZendeskMetadata { export interface ConnectorMetadata { /** *
- * The connector metadata specific to Amazon Redshift. + * The connector metadata specific to Amplitude. *
*/ - Redshift?: RedshiftMetadata; + Amplitude?: AmplitudeMetadata; /** *- * The connector metadata specific to Marketo. + * The connector metadata specific to Datadog. *
*/ - Marketo?: MarketoMetadata; + Datadog?: DatadogMetadata; /** *- * The connector metadata specific to Infor Nexus. + * The connector metadata specific to Dynatrace. *
*/ - InforNexus?: InforNexusMetadata; + Dynatrace?: DynatraceMetadata; /** *- * The connector metadata specific to Zendesk. + * The connector metadata specific to Google Analytics. *
*/ - Zendesk?: ZendeskMetadata; + GoogleAnalytics?: GoogleAnalyticsMetadata; /** *- * The connector metadata specific to Dynatrace. + * The connector metadata specific to Infor Nexus. *
*/ - Dynatrace?: DynatraceMetadata; + InforNexus?: InforNexusMetadata; /** *- * The connector metadata specific to Snowflake. + * The connector metadata specific to Marketo. *
*/ - Snowflake?: SnowflakeMetadata; + Marketo?: MarketoMetadata; /** *- * The connector metadata specific to Trend Micro. + * The connector metadata specific to Amazon Redshift. *
*/ - Trendmicro?: TrendmicroMetadata; + Redshift?: RedshiftMetadata; /** *- * The connector metadata specific to Amazon EventBridge. - *
+ * The connector metadata specific to Amazon S3. + * */ - EventBridge?: EventBridgeMetadata; + S3?: S3Metadata; /** *- * The connector metadata specific to Slack. + * The connector metadata specific to Salesforce. *
*/ - Slack?: SlackMetadata; + Salesforce?: SalesforceMetadata; /** *- * The connector metadata specific to Veeva. + * The connector metadata specific to ServiceNow. *
*/ - Veeva?: VeevaMetadata; + ServiceNow?: ServiceNowMetadata; /** *- * The connector metadata specific to Datadog. + * The connector metadata specific to Singular. *
*/ - Datadog?: DatadogMetadata; + Singular?: SingularMetadata; /** *- * The connector metadata specific to Google Analytics. + * The connector metadata specific to Slack. *
*/ - GoogleAnalytics?: GoogleAnalyticsMetadata; + Slack?: SlackMetadata; /** *- * The connector metadata specific to ServiceNow. + * The connector metadata specific to Snowflake. *
*/ - ServiceNow?: ServiceNowMetadata; + Snowflake?: SnowflakeMetadata; /** *- * The connector metadata specific to Amplitude. + * The connector metadata specific to Trend Micro. *
*/ - Amplitude?: AmplitudeMetadata; + Trendmicro?: TrendmicroMetadata; /** *- * The connector metadata specific to Salesforce. + * The connector metadata specific to Veeva. *
*/ - Salesforce?: SalesforceMetadata; + Veeva?: VeevaMetadata; /** *- * The connector metadata specific to Singular. + * The connector metadata specific to Zendesk. *
*/ - Singular?: SingularMetadata; + Zendesk?: ZendeskMetadata; /** *- * The connector metadata specific to Amazon S3. - *
+ * The connector metadata specific to Amazon EventBridge. + * */ - S3?: S3Metadata; + EventBridge?: EventBridgeMetadata; + + /** + *+ * The connector metadata specific to Upsolver. + *
+ */ + Upsolver?: UpsolverMetadata; } export namespace ConnectorMetadata { @@ -534,6 +554,7 @@ export enum ConnectorType { SLACK = "Slack", SNOWFLAKE = "Snowflake", TRENDMICRO = "Trendmicro", + UPSOLVER = "Upsolver", VEEVA = "Veeva", ZENDESK = "Zendesk", } @@ -561,31 +582,31 @@ export enum TriggerType { export interface ConnectorConfiguration { /** *- * Specifies if a PrivateLink endpoint URL is required. + * Specifies whether the connector can be used as a source. *
*/ - isPrivateLinkEndpointUrlRequired?: boolean; + canUseAsSource?: boolean; /** *- * Specifies the supported flow frequency for that connector. - *
+ * Specifies whether the connector can be used as a destination. + * */ - supportedSchedulingFrequencies?: (ScheduleFrequencyType | string)[]; + canUseAsDestination?: boolean; /** *- * Specifies the supported trigger types for the flow. + * Lists the connectors that are available for use as destinations. *
*/ - supportedTriggerTypes?: (TriggerType | string)[]; + supportedDestinationConnectors?: (ConnectorType | string)[]; /** *- * Specifies whether the connector can be used as a destination. - *
+ * Specifies the supported flow frequency for that connector. + * */ - canUseAsDestination?: boolean; + supportedSchedulingFrequencies?: (ScheduleFrequencyType | string)[]; /** *@@ -596,24 +617,24 @@ export interface ConnectorConfiguration { /** *
- * Specifies whether the connector can be used as a source. + * Specifies if a PrivateLink endpoint URL is required. *
*/ - canUseAsSource?: boolean; + isPrivateLinkEndpointUrlRequired?: boolean; /** *
- * Specifies connector-specific metadata such as oAuthScopes
, supportedRegions
, privateLinkServiceUrl
, and so on.
+ * Specifies the supported trigger types for the flow.
*
- * Lists the connectors that are available for use as destinations.
+ * Specifies connector-specific metadata such as oAuthScopes
, supportedRegions
, privateLinkServiceUrl
, and so on.
*
- * The label applied to the connector entity. + * The name of the connector entity. *
*/ - label?: string; + name: string | undefined; /** *
- * Specifies whether the connector entity is a parent or a category and has more entities nested underneath it. If another call is made with entitiesPath = "the_current_entity_name_with_hasNestedEntities_true"
, then it returns the nested entities underneath it. This provides a way to retrieve all supported entities in a recursive fashion.
+ * The label applied to the connector entity.
*
- * The name of the connector entity.
+ * Specifies whether the connector entity is a parent or a category and has more entities nested underneath it. If another call is made with entitiesPath = "the_current_entity_name_with_hasNestedEntities_true"
, then it returns the nested entities underneath it. This provides a way to retrieve all supported entities in a recursive fashion.
*
+ * Specifies if the destination field can be created by the current user. + *
+ */ + isCreatable?: boolean; + /** ** Specifies if the destination field can have a null value. @@ -678,10 +706,10 @@ export interface DestinationFieldProperties { /** *
- * A list of supported write operations. For each write operation listed, this field can be used in idFieldNames
when that write operation is present as a destination option.
+ * Specifies if the flow run can either insert new rows in the destination field if they do not already exist, or update them if they do.
*
@@ -692,17 +720,10 @@ export interface DestinationFieldProperties { /** *
- * Specifies if the flow run can either insert new rows in the destination field if they do not already exist, or update them if they do.
+ * A list of supported write operations. For each write operation listed, this field can be used in idFieldNames
when that write operation is present as a destination option.
*
- * Specifies if the destination field can be created by the current user. - *
- */ - isCreatable?: boolean; + supportedWriteOperations?: (WriteOperationType | string)[]; } export namespace DestinationFieldProperties { @@ -777,17 +798,17 @@ export interface FieldTypeDetails { /** *
- * The list of values that a field can contain. For example, a Boolean fieldType
can have two values: "true" and "false".
+ * The list of operators supported by a field.
*
- * The list of operators supported by a field.
+ * The list of values that a field can contain. For example, a Boolean fieldType
can have two values: "true" and "false".
*
- * A description of the connector entity field. + * The unique identifier of the connector field. *
*/ - description?: string; + identifier: string | undefined; /** *
- * Contains details regarding the supported FieldType
, including the corresponding filterOperators
and supportedValues
.
+ * The label applied to a connector entity field.
*
- * The label applied to a connector entity field.
+ * Contains details regarding the supported FieldType
, including the corresponding filterOperators
and supportedValues
.
*
- * The properties applied to a field when the connector is being used as a destination. + * A description of the connector entity field. *
*/ - destinationProperties?: DestinationFieldProperties; + description?: string; /** *- * The unique identifier of the connector field. + * The properties that can be applied to a field when the connector is being used as a source. *
*/ - identifier: string | undefined; + sourceProperties?: SourceFieldProperties; /** *- * The properties that can be applied to a field when the connector is being used as a source. + * The properties applied to a field when the connector is being used as a destination. *
*/ - sourceProperties?: SourceFieldProperties; + destinationProperties?: DestinationFieldProperties; } export namespace ConnectorEntityField { @@ -879,17 +900,17 @@ export namespace ConnectorEntityField { export interface ConnectorOAuthRequest { /** *- * The URL to which the authentication server redirects the browser after authorization has been granted. - *
+ * The code provided by the connector when it has been authenticated via the connected app. + * */ - redirectUri?: string; + authCode?: string; /** *- * The code provided by the connector when it has been authenticated via the connected app. - *
+ * The URL to which the authentication server redirects the browser after authorization has been granted. + * */ - authCode?: string; + redirectUri?: string; } export namespace ConnectorOAuthRequest { @@ -1152,101 +1173,101 @@ export enum ZendeskConnectorOperator { export interface ConnectorOperator { /** *- * The operation to be performed on the provided Zendesk source fields. - *
+ * The operation to be performed on the provided Amplitude source fields. + * */ - Zendesk?: ZendeskConnectorOperator | string; + Amplitude?: AmplitudeConnectorOperator | string; /** *- * The operation to be performed on the provided Marketo source fields. + * The operation to be performed on the provided Datadog source fields. *
*/ - Marketo?: MarketoConnectorOperator | string; + Datadog?: DatadogConnectorOperator | string; /** *- * The operation to be performed on the provided Datadog source fields. + * The operation to be performed on the provided Dynatrace source fields. *
*/ - Datadog?: DatadogConnectorOperator | string; + Dynatrace?: DynatraceConnectorOperator | string; /** *- * The operation to be performed on the provided Veeva source fields. + * The operation to be performed on the provided Google Analytics source fields. *
*/ - Veeva?: VeevaConnectorOperator | string; + GoogleAnalytics?: GoogleAnalyticsConnectorOperator | string; /** *- * The operation to be performed on the provided Dynatrace source fields. + * The operation to be performed on the provided Infor Nexus source fields. *
*/ - Dynatrace?: DynatraceConnectorOperator | string; + InforNexus?: InforNexusConnectorOperator | string; /** *- * The operation to be performed on the provided ServiceNow source fields. + * The operation to be performed on the provided Marketo source fields. *
*/ - ServiceNow?: ServiceNowConnectorOperator | string; + Marketo?: MarketoConnectorOperator | string; /** *- * The operation to be performed on the provided Salesforce source fields. + * The operation to be performed on the provided Amazon S3 source fields. *
*/ - Salesforce?: SalesforceConnectorOperator | string; + S3?: S3ConnectorOperator | string; /** *- * The operation to be performed on the provided Amplitude source fields. - *
+ * The operation to be performed on the provided Salesforce source fields. + * */ - Amplitude?: AmplitudeConnectorOperator | string; + Salesforce?: SalesforceConnectorOperator | string; /** *- * The operation to be performed on the provided Singular source fields. + * The operation to be performed on the provided ServiceNow source fields. *
*/ - Singular?: SingularConnectorOperator | string; + ServiceNow?: ServiceNowConnectorOperator | string; /** *- * The operation to be performed on the provided Amazon S3 source fields. + * The operation to be performed on the provided Singular source fields. *
*/ - S3?: S3ConnectorOperator | string; + Singular?: SingularConnectorOperator | string; /** *- * The operation to be performed on the provided Infor Nexus source fields. + * The operation to be performed on the provided Slack source fields. *
*/ - InforNexus?: InforNexusConnectorOperator | string; + Slack?: SlackConnectorOperator | string; /** *- * The operation to be performed on the provided Google Analytics source fields. + * The operation to be performed on the provided Trend Micro source fields. *
*/ - GoogleAnalytics?: GoogleAnalyticsConnectorOperator | string; + Trendmicro?: TrendmicroConnectorOperator | string; /** *- * The operation to be performed on the provided Slack source fields. + * The operation to be performed on the provided Veeva source fields. *
*/ - Slack?: SlackConnectorOperator | string; + Veeva?: VeevaConnectorOperator | string; /** *- * The operation to be performed on the provided Trend Micro source fields. + * The operation to be performed on the provided Zendesk source fields. *
*/ - Trendmicro?: TrendmicroConnectorOperator | string; + Zendesk?: ZendeskConnectorOperator | string; } export namespace ConnectorOperator { @@ -1356,31 +1377,31 @@ export namespace MarketoConnectorProfileProperties { export interface RedshiftConnectorProfileProperties { /** *- * The Amazon Resource Name (ARN) of the IAM role. + * The JDBC URL of the Amazon Redshift cluster. *
*/ - roleArn: string | undefined; + databaseUrl: string | undefined; /** *- * The object key for the destination bucket in which Amazon AppFlow places the files. + * A name for the associated Amazon S3 bucket. *
*/ - bucketPrefix?: string; + bucketName: string | undefined; /** *- * A name for the associated Amazon S3 bucket. + * The object key for the destination bucket in which Amazon AppFlow places the files. *
*/ - bucketName: string | undefined; + bucketPrefix?: string; /** *- * The JDBC URL of the Amazon Redshift cluster. + * The Amazon Resource Name (ARN) of the IAM role. *
*/ - databaseUrl: string | undefined; + roleArn: string | undefined; } export namespace RedshiftConnectorProfileProperties { @@ -1397,17 +1418,17 @@ export namespace RedshiftConnectorProfileProperties { export interface SalesforceConnectorProfileProperties { /** *- * Indicates whether the connector profile applies to a sandbox or production environment. + * The location of the Salesforce resource. *
*/ - isSandboxEnvironment?: boolean; + instanceUrl?: string; /** *- * The location of the Salesforce resource. + * Indicates whether the connector profile applies to a sandbox or production environment. *
*/ - instanceUrl?: string; + isSandboxEnvironment?: boolean; } export namespace SalesforceConnectorProfileProperties { @@ -1477,31 +1498,31 @@ export namespace SlackConnectorProfileProperties { export interface SnowflakeConnectorProfileProperties { /** *- * The name of the account. + * The name of the Snowflake warehouse. *
*/ - accountName?: string; + warehouse: string | undefined; /** *
- * The bucket path that refers to the Amazon S3 bucket associated with Snowflake.
+ * The name of the Amazon S3 stage that was created while setting up an Amazon S3 stage in the Snowflake account. This is written in the following format: < Database>< Schema>
- * The name of the Amazon S3 stage that was created while setting up an Amazon S3 stage in the Snowflake account. This is written in the following format: < Database>< Schema>
- * The name of the Amazon S3 bucket associated with Snowflake. + * The bucket path that refers to the Amazon S3 bucket associated with Snowflake. *
*/ - bucketName: string | undefined; + bucketPrefix?: string; /** *@@ -1512,17 +1533,17 @@ export interface SnowflakeConnectorProfileProperties { /** *
- * The AWS Region of the Snowflake account. + * The name of the account. *
*/ - region?: string; + accountName?: string; /** *- * The name of the Snowflake warehouse. + * The AWS Region of the Snowflake account. *
*/ - warehouse: string | undefined; + region?: string; } export namespace SnowflakeConnectorProfileProperties { @@ -1592,108 +1613,108 @@ export namespace ZendeskConnectorProfileProperties { export interface ConnectorProfileProperties { /** *- * The connector-specific properties required by Slack. + * The connector-specific properties required by Amplitude. *
*/ - Slack?: SlackConnectorProfileProperties; + Amplitude?: AmplitudeConnectorProfileProperties; /** *- * The connector-specific properties required by Snowflake. + * The connector-specific properties required by Datadog. *
*/ - Snowflake?: SnowflakeConnectorProfileProperties; + Datadog?: DatadogConnectorProfileProperties; /** *- * The connector-specific properties required by Veeva. + * The connector-specific properties required by Dynatrace. *
*/ - Veeva?: VeevaConnectorProfileProperties; + Dynatrace?: DynatraceConnectorProfileProperties; /** *- * The connector-specific properties required by Marketo. + * The connector-specific properties required Google Analytics. *
*/ - Marketo?: MarketoConnectorProfileProperties; + GoogleAnalytics?: GoogleAnalyticsConnectorProfileProperties; /** *- * The connector-specific properties required by Datadog. + * The connector-specific properties required by Infor Nexus. *
*/ - Datadog?: DatadogConnectorProfileProperties; + InforNexus?: InforNexusConnectorProfileProperties; /** *- * The connector-specific properties required by Amazon Redshift. + * The connector-specific properties required by Marketo. *
*/ - Redshift?: RedshiftConnectorProfileProperties; + Marketo?: MarketoConnectorProfileProperties; /** *- * The connector-specific properties required by Trend Micro. + * The connector-specific properties required by Amazon Redshift. *
*/ - Trendmicro?: TrendmicroConnectorProfileProperties; + Redshift?: RedshiftConnectorProfileProperties; /** *- * The connector-specific properties required by serviceNow. + * The connector-specific properties required by Salesforce. *
*/ - ServiceNow?: ServiceNowConnectorProfileProperties; + Salesforce?: SalesforceConnectorProfileProperties; /** *- * The connector-specific properties required by Amplitude. + * The connector-specific properties required by serviceNow. *
*/ - Amplitude?: AmplitudeConnectorProfileProperties; + ServiceNow?: ServiceNowConnectorProfileProperties; /** *- * The connector-specific properties required Google Analytics. + * The connector-specific properties required by Singular. *
*/ - GoogleAnalytics?: GoogleAnalyticsConnectorProfileProperties; + Singular?: SingularConnectorProfileProperties; /** *- * The connector-specific properties required by Infor Nexus. + * The connector-specific properties required by Slack. *
*/ - InforNexus?: InforNexusConnectorProfileProperties; + Slack?: SlackConnectorProfileProperties; /** *- * The connector-specific properties required by Zendesk. + * The connector-specific properties required by Snowflake. *
*/ - Zendesk?: ZendeskConnectorProfileProperties; + Snowflake?: SnowflakeConnectorProfileProperties; /** *- * The connector-specific properties required by Singular. + * The connector-specific properties required by Trend Micro. *
*/ - Singular?: SingularConnectorProfileProperties; + Trendmicro?: TrendmicroConnectorProfileProperties; /** *- * The connector-specific properties required by Dynatrace. + * The connector-specific properties required by Veeva. *
*/ - Dynatrace?: DynatraceConnectorProfileProperties; + Veeva?: VeevaConnectorProfileProperties; /** *- * The connector-specific properties required by Salesforce. + * The connector-specific properties required by Zendesk. *
*/ - Salesforce?: SalesforceConnectorProfileProperties; + Zendesk?: ZendeskConnectorProfileProperties; } export namespace ConnectorProfileProperties { @@ -1710,17 +1731,17 @@ export namespace ConnectorProfileProperties { export interface ConnectorProfile { /** *- * The connector-specific properties of the profile configuration. + * The Amazon Resource Name (ARN) of the connector profile. *
*/ - connectorProfileProperties?: ConnectorProfileProperties; + connectorProfileArn?: string; /** *
- * The Amazon Resource Name (ARN) of the connector profile.
+ * The name of the connector profile. The name is unique for each ConnectorProfile
in the AWS account.
*
@@ -1731,24 +1752,24 @@ export interface ConnectorProfile { /** *
- * The Amazon Resource Name (ARN) of the connector profile credentials. + * Indicates the connection mode and if it is public or private. *
*/ - credentialsArn?: string; + connectionMode?: ConnectionMode | string; /** *
- * The name of the connector profile. The name is unique for each ConnectorProfile
in the AWS account.
+ * The Amazon Resource Name (ARN) of the connector profile credentials.
*
- * Indicates the connection mode and if it is public or private. + * The connector-specific properties of the profile configuration. *
*/ - connectionMode?: ConnectionMode | string; + connectorProfileProperties?: ConnectorProfileProperties; /** *@@ -1833,38 +1854,38 @@ export interface GoogleAnalyticsConnectorProfileCredentials { /** *
- * The credentials used to access protected Google Analytics resources. + * The client secret used by the OAuth client to authenticate to the authorization server. *
*/ - accessToken?: string; + clientSecret: string | undefined; /** *- * The OAuth requirement needed to request security tokens from the connector endpoint. + * The credentials used to access protected Google Analytics resources. *
*/ - oAuthRequest?: ConnectorOAuthRequest; + accessToken?: string; /** *- * The client secret used by the OAuth client to authenticate to the authorization server. + * The credentials used to acquire new access tokens. This is required only for OAuth2 access tokens, and is not required for OAuth1 access tokens. *
*/ - clientSecret: string | undefined; + refreshToken?: string; /** *- * The credentials used to acquire new access tokens. This is required only for OAuth2 access tokens, and is not required for OAuth1 access tokens. + * The OAuth requirement needed to request security tokens from the connector endpoint. *
*/ - refreshToken?: string; + oAuthRequest?: ConnectorOAuthRequest; } export namespace GoogleAnalyticsConnectorProfileCredentials { export const filterSensitiveLog = (obj: GoogleAnalyticsConnectorProfileCredentials): any => ({ ...obj, - ...(obj.accessToken && { accessToken: SENSITIVE_STRING }), ...(obj.clientSecret && { clientSecret: SENSITIVE_STRING }), + ...(obj.accessToken && { accessToken: SENSITIVE_STRING }), }); } @@ -1876,31 +1897,31 @@ export namespace GoogleAnalyticsConnectorProfileCredentials { export interface InforNexusConnectorProfileCredentials { /** *- * The identifier for the user. + * The Access Key portion of the credentials. *
*/ - userId: string | undefined; + accessKeyId: string | undefined; /** *- * The Access Key portion of the credentials. + * The identifier for the user. *
*/ - accessKeyId: string | undefined; + userId: string | undefined; /** *- * The encryption keys used to encrypt data. + * The secret key used to sign requests. *
*/ - datakey: string | undefined; + secretAccessKey: string | undefined; /** *- * The secret key used to sign requests. + * The encryption keys used to encrypt data. *
*/ - secretAccessKey: string | undefined; + datakey: string | undefined; } export namespace InforNexusConnectorProfileCredentials { @@ -1918,24 +1939,24 @@ export namespace InforNexusConnectorProfileCredentials { export interface MarketoConnectorProfileCredentials { /** *- * The client secret used by the OAuth client to authenticate to the authorization server. + * The identifier for the desired client. *
*/ - clientSecret: string | undefined; + clientId: string | undefined; /** *- * The credentials used to access protected Marketo resources. + * The client secret used by the OAuth client to authenticate to the authorization server. *
*/ - accessToken?: string; + clientSecret: string | undefined; /** *- * The identifier for the desired client. + * The credentials used to access protected Marketo resources. *
*/ - clientId: string | undefined; + accessToken?: string; /** *@@ -1989,38 +2010,38 @@ export namespace RedshiftConnectorProfileCredentials { export interface SalesforceConnectorProfileCredentials { /** *
- * The credentials used to acquire new access tokens. + * The credentials used to access protected Salesforce resources. *
*/ - refreshToken?: string; + accessToken?: string; /** *- * The secret manager ARN, which contains the client ID and client secret of the connected app. - *
+ * The credentials used to acquire new access tokens. + * */ - clientCredentialsArn?: string; + refreshToken?: string; /** *- * The credentials used to access protected Salesforce resources. + * The OAuth requirement needed to request security tokens from the connector endpoint. *
*/ - accessToken?: string; + oAuthRequest?: ConnectorOAuthRequest; /** *- * The OAuth requirement needed to request security tokens from the connector endpoint. - *
+ * The secret manager ARN, which contains the client ID and client secret of the connected app. + * */ - oAuthRequest?: ConnectorOAuthRequest; + clientCredentialsArn?: string; } export namespace SalesforceConnectorProfileCredentials { export const filterSensitiveLog = (obj: SalesforceConnectorProfileCredentials): any => ({ ...obj, - ...(obj.clientCredentialsArn && { clientCredentialsArn: SENSITIVE_STRING }), ...(obj.accessToken && { accessToken: SENSITIVE_STRING }), + ...(obj.clientCredentialsArn && { clientCredentialsArn: SENSITIVE_STRING }), }); } @@ -2086,17 +2107,17 @@ export interface SlackConnectorProfileCredentials { /** *- * The credentials used to access protected Slack resources. + * The client secret used by the OAuth client to authenticate to the authorization server. *
*/ - accessToken?: string; + clientSecret: string | undefined; /** *- * The client secret used by the OAuth client to authenticate to the authorization server. + * The credentials used to access protected Slack resources. *
*/ - clientSecret: string | undefined; + accessToken?: string; /** *@@ -2109,8 +2130,8 @@ export interface SlackConnectorProfileCredentials { export namespace SlackConnectorProfileCredentials { export const filterSensitiveLog = (obj: SlackConnectorProfileCredentials): any => ({ ...obj, - ...(obj.accessToken && { accessToken: SENSITIVE_STRING }), ...(obj.clientSecret && { clientSecret: SENSITIVE_STRING }), + ...(obj.accessToken && { accessToken: SENSITIVE_STRING }), }); } @@ -2122,17 +2143,17 @@ export namespace SlackConnectorProfileCredentials { export interface SnowflakeConnectorProfileCredentials { /** *
- * The password that corresponds to the user name. + * The name of the user. *
*/ - password: string | undefined; + username: string | undefined; /** *- * The name of the user. + * The password that corresponds to the user name. *
*/ - username: string | undefined; + password: string | undefined; } export namespace SnowflakeConnectorProfileCredentials { @@ -2240,20 +2261,6 @@ export namespace ZendeskConnectorProfileCredentials { * */ export interface ConnectorProfileCredentials { - /** - *- * The connector-specific credentials required when using ServiceNow. - *
- */ - ServiceNow?: ServiceNowConnectorProfileCredentials; - - /** - *- * The connector-specific credentials required when using Salesforce. - *
- */ - Salesforce?: SalesforceConnectorProfileCredentials; - /** ** The connector-specific credentials required when using Amplitude. @@ -2263,10 +2270,10 @@ export interface ConnectorProfileCredentials { /** *
- * The connector-specific credentials required when using Trend Micro. + * The connector-specific credentials required when using Datadog. *
*/ - Trendmicro?: TrendmicroConnectorProfileCredentials; + Datadog?: DatadogConnectorProfileCredentials; /** *@@ -2275,13 +2282,6 @@ export interface ConnectorProfileCredentials { */ Dynatrace?: DynatraceConnectorProfileCredentials; - /** - *
- * The connector-specific credentials required when using Zendesk. - *
- */ - Zendesk?: ZendeskConnectorProfileCredentials; - /** ** The connector-specific credentials required when using Google Analytics. @@ -2291,17 +2291,17 @@ export interface ConnectorProfileCredentials { /** *
- * The connector-specific credentials required when using Singular. + * The connector-specific credentials required when using Infor Nexus. *
*/ - Singular?: SingularConnectorProfileCredentials; + InforNexus?: InforNexusConnectorProfileCredentials; /** *- * The connector-specific credentials required when using Snowflake. + * The connector-specific credentials required when using Marketo. *
*/ - Snowflake?: SnowflakeConnectorProfileCredentials; + Marketo?: MarketoConnectorProfileCredentials; /** *@@ -2312,31 +2312,45 @@ export interface ConnectorProfileCredentials { /** *
- * The connector-specific credentials required when using Infor Nexus. + * The connector-specific credentials required when using Salesforce. *
*/ - InforNexus?: InforNexusConnectorProfileCredentials; + Salesforce?: SalesforceConnectorProfileCredentials; /** *- * The connector-specific credentials required when using Slack. + * The connector-specific credentials required when using ServiceNow. *
*/ - Slack?: SlackConnectorProfileCredentials; + ServiceNow?: ServiceNowConnectorProfileCredentials; /** *- * The connector-specific credentials required when using Marketo. + * The connector-specific credentials required when using Singular. *
*/ - Marketo?: MarketoConnectorProfileCredentials; + Singular?: SingularConnectorProfileCredentials; /** *- * The connector-specific credentials required when using Datadog. + * The connector-specific credentials required when using Slack. *
*/ - Datadog?: DatadogConnectorProfileCredentials; + Slack?: SlackConnectorProfileCredentials; + + /** + *+ * The connector-specific credentials required when using Snowflake. + *
+ */ + Snowflake?: SnowflakeConnectorProfileCredentials; + + /** + *+ * The connector-specific credentials required when using Trend Micro. + *
+ */ + Trendmicro?: TrendmicroConnectorProfileCredentials; /** *@@ -2344,25 +2358,32 @@ export interface ConnectorProfileCredentials { *
*/ Veeva?: VeevaConnectorProfileCredentials; + + /** + *+ * The connector-specific credentials required when using Zendesk. + *
+ */ + Zendesk?: ZendeskConnectorProfileCredentials; } export namespace ConnectorProfileCredentials { export const filterSensitiveLog = (obj: ConnectorProfileCredentials): any => ({ ...obj, - ...(obj.ServiceNow && { ServiceNow: ServiceNowConnectorProfileCredentials.filterSensitiveLog(obj.ServiceNow) }), - ...(obj.Salesforce && { Salesforce: SalesforceConnectorProfileCredentials.filterSensitiveLog(obj.Salesforce) }), ...(obj.Amplitude && { Amplitude: AmplitudeConnectorProfileCredentials.filterSensitiveLog(obj.Amplitude) }), - ...(obj.Trendmicro && { Trendmicro: TrendmicroConnectorProfileCredentials.filterSensitiveLog(obj.Trendmicro) }), - ...(obj.Zendesk && { Zendesk: ZendeskConnectorProfileCredentials.filterSensitiveLog(obj.Zendesk) }), ...(obj.GoogleAnalytics && { GoogleAnalytics: GoogleAnalyticsConnectorProfileCredentials.filterSensitiveLog(obj.GoogleAnalytics), }), - ...(obj.Snowflake && { Snowflake: SnowflakeConnectorProfileCredentials.filterSensitiveLog(obj.Snowflake) }), - ...(obj.Redshift && { Redshift: RedshiftConnectorProfileCredentials.filterSensitiveLog(obj.Redshift) }), ...(obj.InforNexus && { InforNexus: InforNexusConnectorProfileCredentials.filterSensitiveLog(obj.InforNexus) }), - ...(obj.Slack && { Slack: SlackConnectorProfileCredentials.filterSensitiveLog(obj.Slack) }), ...(obj.Marketo && { Marketo: MarketoConnectorProfileCredentials.filterSensitiveLog(obj.Marketo) }), + ...(obj.Redshift && { Redshift: RedshiftConnectorProfileCredentials.filterSensitiveLog(obj.Redshift) }), + ...(obj.Salesforce && { Salesforce: SalesforceConnectorProfileCredentials.filterSensitiveLog(obj.Salesforce) }), + ...(obj.ServiceNow && { ServiceNow: ServiceNowConnectorProfileCredentials.filterSensitiveLog(obj.ServiceNow) }), + ...(obj.Slack && { Slack: SlackConnectorProfileCredentials.filterSensitiveLog(obj.Slack) }), + ...(obj.Snowflake && { Snowflake: SnowflakeConnectorProfileCredentials.filterSensitiveLog(obj.Snowflake) }), + ...(obj.Trendmicro && { Trendmicro: TrendmicroConnectorProfileCredentials.filterSensitiveLog(obj.Trendmicro) }), ...(obj.Veeva && { Veeva: VeevaConnectorProfileCredentials.filterSensitiveLog(obj.Veeva) }), + ...(obj.Zendesk && { Zendesk: ZendeskConnectorProfileCredentials.filterSensitiveLog(obj.Zendesk) }), }); } @@ -2423,31 +2444,31 @@ export interface CreateConnectorProfileRequest { /** *- * The type of connector, such as Salesforce, Amplitude, and so on. + * The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key. *
*/ - connectorType: ConnectorType | string | undefined; + kmsArn?: string; /** *- * Defines the connector-specific configuration and credentials. + * The type of connector, such as Salesforce, Amplitude, and so on. *
*/ - connectorProfileConfig: ConnectorProfileConfig | undefined; + connectorType: ConnectorType | string | undefined; /** *- * The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key. + * Indicates the connection mode and specifies whether it is public or private. Private flows use AWS PrivateLink to route data over AWS infrastructure without exposing it to the public internet. *
*/ - kmsArn?: string; + connectionMode: ConnectionMode | string | undefined; /** *- * Indicates the connection mode and specifies whether it is public or private. Private flows use AWS PrivateLink to route data over AWS infrastructure without exposing it to the public internet. + * Defines the connector-specific configuration and credentials. *
*/ - connectionMode: ConnectionMode | string | undefined; + connectorProfileConfig: ConnectorProfileConfig | undefined; } export namespace CreateConnectorProfileRequest { @@ -2565,18 +2586,18 @@ export namespace ErrorHandlingConfig { export interface EventBridgeDestinationProperties { /** *
- * The settings that determine how Amazon AppFlow handles an error when placing data in the destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
- *
- *
- * The object specified in the Amazon EventBridge flow destination. - *
+ * The settings that determine how Amazon AppFlow handles an error when placing data in the destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure.ErrorHandlingConfig
is a part of the destination connector details.
+ *
+ *
*/
- object: string | undefined;
+ errorHandlingConfig?: ErrorHandlingConfig;
}
export namespace EventBridgeDestinationProperties {
@@ -2593,31 +2614,31 @@ export namespace EventBridgeDestinationProperties {
export interface RedshiftDestinationProperties {
/**
* - * The object key for the bucket in which Amazon AppFlow places the destination files. + * The object specified in the Amazon Redshift flow destination. *
*/ - bucketPrefix?: string; + object: string | undefined; /** *- * The object specified in the Amazon Redshift flow destination. + * The intermediate bucket that Amazon AppFlow uses when moving data into Amazon Redshift. *
*/ - object: string | undefined; + intermediateBucketName: string | undefined; /** *
- * The settings that determine how Amazon AppFlow handles an error when placing data in the Amazon Redshift destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
+ * The object key for the bucket in which Amazon AppFlow places the destination files.
*
- * The intermediate bucket that Amazon AppFlow uses when moving data into Amazon Redshift.
+ * The settings that determine how Amazon AppFlow handles an error when placing data in the Amazon Redshift destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
*
+ * Indicates the file type that Amazon AppFlow places in the Amazon S3 bucket. + *
+ */ + fileType?: FileType | string; + /** ** Determines the prefix that Amazon AppFlow applies to the folder name in the Amazon S3 bucket. You can name folders according to the flow frequency and date. @@ -2692,13 +2720,6 @@ export interface S3OutputFormatConfig { *
*/ aggregationConfig?: AggregationConfig; - - /** - *- * Indicates the file type that Amazon AppFlow places in the Amazon S3 bucket. - *
- */ - fileType?: FileType | string; } export namespace S3OutputFormatConfig { @@ -2715,17 +2736,17 @@ export namespace S3OutputFormatConfig { export interface S3DestinationProperties { /** *- * The object key for the destination bucket in which Amazon AppFlow places the files. + * The Amazon S3 bucket name in which Amazon AppFlow places the transferred data. *
*/ - bucketPrefix?: string; + bucketName: string | undefined; /** *- * The Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + * The object key for the destination bucket in which Amazon AppFlow places the files. *
*/ - bucketName: string | undefined; + bucketPrefix?: string; /** *@@ -2749,10 +2770,10 @@ export namespace S3DestinationProperties { export interface SalesforceDestinationProperties { /** *
- * The settings that determine how Amazon AppFlow handles an error when placing data in the Salesforce destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
+ * The object specified in the Salesforce flow destination.
*
@@ -2763,10 +2784,10 @@ export interface SalesforceDestinationProperties { /** *
- * The object specified in the Salesforce flow destination.
+ * The settings that determine how Amazon AppFlow handles an error when placing data in the Salesforce destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
*
@@ -2790,31 +2811,31 @@ export namespace SalesforceDestinationProperties { export interface SnowflakeDestinationProperties { /** *
- * The object key for the destination bucket in which Amazon AppFlow places the files. + * The object specified in the Snowflake flow destination. *
*/ - bucketPrefix?: string; + object: string | undefined; /** *
- * The settings that determine how Amazon AppFlow handles an error when placing data in the Snowflake destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
+ * The intermediate bucket that Amazon AppFlow uses when moving data into Snowflake.
*
- * The object specified in the Snowflake flow destination. + * The object key for the destination bucket in which Amazon AppFlow places the files. *
*/ - object: string | undefined; + bucketPrefix?: string; /** *
- * The intermediate bucket that Amazon AppFlow uses when moving data into Snowflake.
+ * The settings that determine how Amazon AppFlow handles an error when placing data in the Snowflake destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig
is a part of the destination connector details.
*
- * This stores the information that is required to query a particular connector. - *
+ * The configuration that determines how Amazon AppFlow formats the flow output data when Upsolver is used as the destination. + * */ -export interface DestinationConnectorProperties { +export interface UpsolverS3OutputFormatConfig { /** *- * The properties required to query Amazon EventBridge. + * Indicates the file type that Amazon AppFlow places in the Upsolver Amazon S3 bucket. *
*/ - EventBridge?: EventBridgeDestinationProperties; + fileType?: FileType | string; /** *- * The properties required to query Snowflake. + * Determines the prefix that Amazon AppFlow applies to the destination folder name. You can name your destination folders according to the flow frequency and date. + *
+ */ + prefixConfig: PrefixConfig | undefined; + + /** + *+ * The aggregation settings that you can use to customize the output format of your flow data. *
*/ - Snowflake?: SnowflakeDestinationProperties; + aggregationConfig?: AggregationConfig; +} + +export namespace UpsolverS3OutputFormatConfig { + export const filterSensitiveLog = (obj: UpsolverS3OutputFormatConfig): any => ({ + ...obj, + }); +} + +/** + *+ * The properties that are applied when Upsolver is used as a destination. + *
+ */ +export interface UpsolverDestinationProperties { + /** + *+ * The Upsolver Amazon S3 bucket name in which Amazon AppFlow places the transferred data. + *
+ */ + bucketName: string | undefined; + + /** + *+ * The object key for the destination Upsolver Amazon S3 bucket in which Amazon AppFlow places the files. + *
+ */ + bucketPrefix?: string; + + /** + *+ * The configuration that determines how data is formatted when Upsolver is used as the flow destination. + *
+ */ + s3OutputFormatConfig: UpsolverS3OutputFormatConfig | undefined; +} +export namespace UpsolverDestinationProperties { + export const filterSensitiveLog = (obj: UpsolverDestinationProperties): any => ({ + ...obj, + }); +} + +/** + *+ * This stores the information that is required to query a particular connector. + *
+ */ +export interface DestinationConnectorProperties { /** ** The properties required to query Amazon Redshift. @@ -2850,6 +2925,13 @@ export interface DestinationConnectorProperties { */ Redshift?: RedshiftDestinationProperties; + /** + *
+ * The properties required to query Amazon S3. + *
+ */ + S3?: S3DestinationProperties; + /** ** The properties required to query Salesforce. @@ -2859,10 +2941,24 @@ export interface DestinationConnectorProperties { /** *
- * The properties required to query Amazon S3. + * The properties required to query Snowflake. *
*/ - S3?: S3DestinationProperties; + Snowflake?: SnowflakeDestinationProperties; + + /** + *+ * The properties required to query Amazon EventBridge. + *
+ */ + EventBridge?: EventBridgeDestinationProperties; + + /** + *+ * The properties required to query Upsolver. + *
+ */ + Upsolver?: UpsolverDestinationProperties; } export namespace DestinationConnectorProperties { @@ -2879,10 +2975,10 @@ export namespace DestinationConnectorProperties { export interface DestinationFlowConfig { /** *- * This stores the information that is required to query a particular connector. + * The type of connector, such as Salesforce, Amplitude, and so on. *
*/ - destinationConnectorProperties: DestinationConnectorProperties | undefined; + connectorType: ConnectorType | string | undefined; /** *@@ -2893,10 +2989,10 @@ export interface DestinationFlowConfig { /** *
- * The type of connector, such as Salesforce, Amplitude, and so on. + * This stores the information that is required to query a particular connector. *
*/ - connectorType: ConnectorType | string | undefined; + destinationConnectorProperties: DestinationConnectorProperties | undefined; } export namespace DestinationFlowConfig { @@ -3067,17 +3163,17 @@ export interface SalesforceSourceProperties { /** *- * Indicates whether Amazon AppFlow includes deleted files in the flow run. - *
+ * The flag that enables dynamic fetching of new (recently added) fields in the Salesforce objects while running a flow. + * */ - includeDeletedRecords?: boolean; + enableDynamicFieldUpdate?: boolean; /** *- * The flag that enables dynamic fetching of new (recently added) fields in the Salesforce objects while running a flow. - *
+ * Indicates whether Amazon AppFlow includes deleted files in the flow run. + * */ - enableDynamicFieldUpdate?: boolean; + includeDeletedRecords?: boolean; } export namespace SalesforceSourceProperties { @@ -3214,101 +3310,101 @@ export namespace ZendeskSourceProperties { export interface SourceConnectorProperties { /** *- * Specifies the information that is required for querying Amazon S3. + * Specifies the information that is required for querying Amplitude. *
*/ - S3?: S3SourceProperties; + Amplitude?: AmplitudeSourceProperties; /** *- * Specifies the information that is required for querying Amplitude. + * Specifies the information that is required for querying Datadog. *
*/ - Amplitude?: AmplitudeSourceProperties; + Datadog?: DatadogSourceProperties; /** *- * Specifies the information that is required for querying ServiceNow. + * Specifies the information that is required for querying Dynatrace. *
*/ - ServiceNow?: ServiceNowSourceProperties; + Dynatrace?: DynatraceSourceProperties; /** *- * Specifies the information that is required for querying Zendesk. + * Specifies the information that is required for querying Google Analytics. *
*/ - Zendesk?: ZendeskSourceProperties; + GoogleAnalytics?: GoogleAnalyticsSourceProperties; /** *- * Specifies the information that is required for querying Veeva. + * Specifies the information that is required for querying Infor Nexus. *
*/ - Veeva?: VeevaSourceProperties; + InforNexus?: InforNexusSourceProperties; /** *- * Specifies the information that is required for querying Dynatrace. + * Specifies the information that is required for querying Marketo. *
*/ - Dynatrace?: DynatraceSourceProperties; + Marketo?: MarketoSourceProperties; /** *- * Specifies the information that is required for querying Trend Micro. + * Specifies the information that is required for querying Amazon S3. *
*/ - Trendmicro?: TrendmicroSourceProperties; + S3?: S3SourceProperties; /** *- * Specifies the information that is required for querying Infor Nexus. + * Specifies the information that is required for querying Salesforce. *
*/ - InforNexus?: InforNexusSourceProperties; + Salesforce?: SalesforceSourceProperties; /** *- * Specifies the information that is required for querying Marketo. + * Specifies the information that is required for querying ServiceNow. *
*/ - Marketo?: MarketoSourceProperties; + ServiceNow?: ServiceNowSourceProperties; /** *- * Specifies the information that is required for querying Datadog. + * Specifies the information that is required for querying Singular. *
*/ - Datadog?: DatadogSourceProperties; + Singular?: SingularSourceProperties; /** *- * Specifies the information that is required for querying Salesforce. + * Specifies the information that is required for querying Slack. *
*/ - Salesforce?: SalesforceSourceProperties; + Slack?: SlackSourceProperties; /** *- * Specifies the information that is required for querying Singular. + * Specifies the information that is required for querying Trend Micro. *
*/ - Singular?: SingularSourceProperties; + Trendmicro?: TrendmicroSourceProperties; /** *- * Specifies the information that is required for querying Slack. + * Specifies the information that is required for querying Veeva. *
*/ - Slack?: SlackSourceProperties; + Veeva?: VeevaSourceProperties; /** *- * Specifies the information that is required for querying Google Analytics. + * Specifies the information that is required for querying Zendesk. *
*/ - GoogleAnalytics?: GoogleAnalyticsSourceProperties; + Zendesk?: ZendeskSourceProperties; } export namespace SourceConnectorProperties { @@ -3339,17 +3435,17 @@ export interface SourceFlowConfig { /** *- * Defines the configuration for a scheduled incremental data pull. If a valid configuration is provided, the fields specified in the configuration are used when querying for the incremental data pull. - *
+ * Specifies the information that is required to query a particular source connector. + * */ - incrementalPullConfig?: IncrementalPullConfig; + sourceConnectorProperties: SourceConnectorProperties | undefined; /** *- * Specifies the information that is required to query a particular source connector. - *
+ * Defines the configuration for a scheduled incremental data pull. If a valid configuration is provided, the fields specified in the configuration are used when querying for the incremental data pull. + * */ - sourceConnectorProperties: SourceConnectorProperties | undefined; + incrementalPullConfig?: IncrementalPullConfig; } export namespace SourceFlowConfig { @@ -3393,17 +3489,17 @@ export enum TaskType { export interface Task { /** *- * Specifies the particular task implementation that Amazon AppFlow performs. + * The source fields to which a particular task is applied. *
*/ - taskType: TaskType | string | undefined; + sourceFields: string[] | undefined; /** *- * The source fields to which a particular task is applied. + * The operation to be performed on the provided source fields. *
*/ - sourceFields: string[] | undefined; + connectorOperator?: ConnectorOperator; /** *@@ -3414,10 +3510,10 @@ export interface Task { /** *
- * The operation to be performed on the provided source fields. + * Specifies the particular task implementation that Amazon AppFlow performs. *
*/ - connectorOperator?: ConnectorOperator; + taskType: TaskType | string | undefined; /** *@@ -3446,14 +3542,7 @@ export enum DataPullMode { export interface ScheduledTriggerProperties { /** *
- * Specifies the time zone used when referring to the date and time of a scheduled-triggered flow. - *
- */ - timezone?: string; - - /** - *
- * The scheduling expression that determines when and how often the rule runs.
+ * The scheduling expression that determines the rate at which the schedule will run, for example rate(5minutes)
.
*
+ * Specifies the time zone used when referring to the date and time of a scheduled-triggered flow. + *
+ */ + timezone?: string; } export namespace ScheduledTriggerProperties { @@ -3516,17 +3612,17 @@ export namespace TriggerProperties { export interface TriggerConfig { /** *
- * Specifies the configuration details of a schedule-triggered flow as defined by the user. Currently, these settings only apply to the Scheduled
trigger type.
+ * Specifies the type of flow trigger. This can be OnDemand
, Scheduled
, or Event
.
*
- * Specifies the type of flow trigger. This can be OnDemand
, Scheduled
, or Event
.
+ * Specifies the configuration details of a schedule-triggered flow as defined by the user. Currently, these settings only apply to the Scheduled
trigger type.
*
- * The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key. + * The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only. *
*/ - kmsArn?: string; + flowName: string | undefined; /** *- * The configuration that controls how Amazon AppFlow places data in the destination connector. + * A description of the flow you want to create. *
*/ - destinationFlowConfigList: DestinationFlowConfig[] | undefined; + description?: string; /** *- * The configuration that controls how Amazon AppFlow retrieves data from the source connector. + * The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key. *
*/ - sourceFlowConfig: SourceFlowConfig | undefined; + kmsArn?: string; /** *@@ -3566,31 +3662,31 @@ export interface CreateFlowRequest { /** *
- * A description of the flow you want to create. + * The configuration that controls how Amazon AppFlow retrieves data from the source connector. *
*/ - description?: string; + sourceFlowConfig: SourceFlowConfig | undefined; /** *- * The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only. + * The configuration that controls how Amazon AppFlow places data in the destination connector. *
*/ - flowName: string | undefined; + destinationFlowConfigList: DestinationFlowConfig[] | undefined; /** *- * The tags used to organize, track, or control access for your flow. + * A list of tasks that Amazon AppFlow performs while transferring the data in the flow run. *
*/ - tags?: { [key: string]: string }; + tasks: Task[] | undefined; /** *- * A list of tasks that Amazon AppFlow performs while transferring the data in the flow run. + * The tags used to organize, track, or control access for your flow. *
*/ - tasks: Task[] | undefined; + tags?: { [key: string]: string }; } export namespace CreateFlowRequest { @@ -3611,17 +3707,17 @@ export enum FlowStatus { export interface CreateFlowResponse { /** *- * Indicates the current status of the flow. - *
+ * The flow's Amazon Resource Name (ARN). + * */ - flowStatus?: FlowStatus | string; + flowArn?: string; /** *- * The flow's Amazon Resource Name (ARN). - *
+ * Indicates the current status of the flow. + * */ - flowArn?: string; + flowStatus?: FlowStatus | string; } export namespace CreateFlowResponse { @@ -3650,17 +3746,17 @@ export namespace ResourceNotFoundException { export interface DeleteConnectorProfileRequest { /** *
- * Indicates whether Amazon AppFlow should delete the profile, even if it is currently in use in one or more flows.
+ * The name of the connector profile. The name is unique for each ConnectorProfile
in your account.
*
- * The name of the connector profile. The name is unique for each ConnectorProfile
in your account.
+ * Indicates whether Amazon AppFlow should delete the profile, even if it is currently in use in one or more flows.
*
- * The pagination token for the next page of data. - *
- */ - nextToken?: string; - /** *
* The name of the connector profile. The name is unique for each ConnectorProfile
in the AWS account.
@@ -3779,6 +3868,13 @@ export interface DescribeConnectorProfilesRequest {
*
+ * The pagination token for the next page of data. + *
+ */ + nextToken?: string; } export namespace DescribeConnectorProfilesRequest { @@ -3812,17 +3908,17 @@ export namespace DescribeConnectorProfilesResponse { export interface DescribeConnectorsRequest { /** *- * The pagination token for the next page of data. + * The type of connector, such as Salesforce, Amplitude, and so on. *
*/ - nextToken?: string; + connectorTypes?: (ConnectorType | string)[]; /** *- * The type of connector, such as Salesforce, Amplitude, and so on. + * The pagination token for the next page of data. *
*/ - connectorTypes?: (ConnectorType | string)[]; + nextToken?: string; } export namespace DescribeConnectorsRequest { @@ -3882,24 +3978,24 @@ export enum ExecutionStatus { export interface ExecutionDetails { /** *- * Specifies the time of the most recent flow run. + * Describes the details of the most recent flow run. *
*/ - mostRecentExecutionTime?: Date; + mostRecentExecutionMessage?: string; /** *- * Specifies the status of the most recent flow run. + * Specifies the time of the most recent flow run. *
*/ - mostRecentExecutionStatus?: ExecutionStatus | string; + mostRecentExecutionTime?: Date; /** *- * Describes the details of the most recent flow run. + * Specifies the status of the most recent flow run. *
*/ - mostRecentExecutionMessage?: string; + mostRecentExecutionStatus?: ExecutionStatus | string; } export namespace ExecutionDetails { @@ -3911,24 +4007,24 @@ export namespace ExecutionDetails { export interface DescribeFlowResponse { /** *- * A description of the flow. + * The flow's Amazon Resource Name (ARN). *
*/ - description?: string; + flowArn?: string; /** *- * The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only. + * A description of the flow. *
*/ - flowName?: string; + description?: string; /** *- * Indicates the current status of the flow. - *
+ * The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only. + * */ - flowStatus?: FlowStatus | string; + flowName?: string; /** *@@ -3939,17 +4035,17 @@ export interface DescribeFlowResponse { /** *
- * The configuration that controls how Amazon AppFlow transfers data to the destination connector. - *
+ * Indicates the current status of the flow. + * */ - destinationFlowConfigList?: DestinationFlowConfig[]; + flowStatus?: FlowStatus | string; /** *- * A list of tasks that Amazon AppFlow performs while transferring the data in the flow run. - *
+ * Contains an error message if the flow status is in a suspended or error state. This applies only to scheduled or event-triggered flows. + * */ - tasks?: Task[]; + flowStatusMessage?: string; /** *@@ -3960,31 +4056,31 @@ export interface DescribeFlowResponse { /** *
- * Specifies the user name of the account that performed the most recent update. + * The configuration that controls how Amazon AppFlow transfers data to the destination connector. *
*/ - lastUpdatedBy?: string; + destinationFlowConfigList?: DestinationFlowConfig[]; /** *- * The flow's Amazon Resource Name (ARN). + * Describes the details of the most recent flow run. *
*/ - flowArn?: string; + lastRunExecutionDetails?: ExecutionDetails; /** *- * The ARN of the user who created the flow. + * The trigger settings that determine how and when the flow runs. *
*/ - createdBy?: string; + triggerConfig?: TriggerConfig; /** *- * Contains an error message if the flow status is in a suspended or error state. This applies only to scheduled or event-triggered flows. - *
+ * A list of tasks that Amazon AppFlow performs while transferring the data in the flow run. + * */ - flowStatusMessage?: string; + tasks?: Task[]; /** *@@ -4002,17 +4098,17 @@ export interface DescribeFlowResponse { /** *
- * The trigger settings that determine how and when the flow runs. + * The ARN of the user who created the flow. *
*/ - triggerConfig?: TriggerConfig; + createdBy?: string; /** *- * Describes the details of the most recent flow run. + * Specifies the user name of the account that performed the most recent update. *
*/ - lastRunExecutionDetails?: ExecutionDetails; + lastUpdatedBy?: string; /** *@@ -4029,13 +4125,6 @@ export namespace DescribeFlowResponse { } export interface DescribeFlowExecutionRecordsRequest { - /** - *
- * The pagination token for the next page of data. - *
- */ - nextToken?: string; - /** ** The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only. @@ -4049,6 +4138,13 @@ export interface DescribeFlowExecutionRecordsRequest { *
*/ maxResults?: number; + + /** + *+ * The pagination token for the next page of data. + *
+ */ + nextToken?: string; } export namespace DescribeFlowExecutionRecordsRequest { @@ -4106,17 +4202,17 @@ export interface ExecutionResult { /** *- * The number of records processed in the flow run. + * The total number of bytes written as a result of the flow run. *
*/ - recordsProcessed?: number; + bytesWritten?: number; /** *- * The total number of bytes written as a result of the flow run. + * The number of records processed in the flow run. *
*/ - bytesWritten?: number; + recordsProcessed?: number; } export namespace ExecutionResult { @@ -4133,24 +4229,24 @@ export namespace ExecutionResult { export interface ExecutionRecord { /** *- * Specifies the flow run status and whether it is in progress, has completed successfully, or has failed. + * Specifies the identifier of the given flow run. *
*/ - executionStatus?: ExecutionStatus | string; + executionId?: string; /** *- * Specifies the identifier of the given flow run. + * Specifies the flow run status and whether it is in progress, has completed successfully, or has failed. *
*/ - executionId?: string; + executionStatus?: ExecutionStatus | string; /** *- * Specifies the time of the most recent update. + * Describes the result of the given flow run. *
*/ - lastUpdatedAt?: Date; + executionResult?: ExecutionResult; /** *@@ -4161,10 +4257,10 @@ export interface ExecutionRecord { /** *
- * Describes the result of the given flow run. + * Specifies the time of the most recent update. *
*/ - executionResult?: ExecutionResult; + lastUpdatedAt?: Date; } export namespace ExecutionRecord { @@ -4203,24 +4299,24 @@ export namespace DescribeFlowExecutionRecordsResponse { export interface FlowDefinition { /** *- * A user-entered description of the flow. + * The flow's Amazon Resource Name (ARN). *
*/ - description?: string; + flowArn?: string; /** *- * The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only. + * A user-entered description of the flow. *
*/ - flowName?: string; + description?: string; /** *
- * Specifies the type of flow trigger. This can be OnDemand
, Scheduled
, or Event
.
+ * The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens (-) only.
*
@@ -4231,31 +4327,31 @@ export interface FlowDefinition { /** *
- * Specifies the destination connector type, such as Salesforce, Amazon S3, Amplitude, and so on. + * Specifies the source connector type, such as Salesforce, Amazon S3, Amplitude, and so on. *
*/ - destinationConnectorType?: ConnectorType | string; + sourceConnectorType?: ConnectorType | string; /** *- * Specifies the source connector type, such as Salesforce, Amazon S3, Amplitude, and so on. + * Specifies the destination connector type, such as Salesforce, Amazon S3, Amplitude, and so on. *
*/ - sourceConnectorType?: ConnectorType | string; + destinationConnectorType?: ConnectorType | string; /** *
- * The tags used to organize, track, or control access for your flow.
+ * Specifies the type of flow trigger. This can be OnDemand
, Scheduled
, or Event
.
*
- * Describes the details of the most recent flow run. + * Specifies when the flow was created. *
*/ - lastRunExecutionDetails?: ExecutionDetails; + createdAt?: Date; /** *@@ -4273,24 +4369,24 @@ export interface FlowDefinition { /** *
- * The flow's Amazon Resource Name (ARN). + * Specifies the account user name that most recently updated the flow. *
*/ - flowArn?: string; + lastUpdatedBy?: string; /** *- * Specifies the account user name that most recently updated the flow. + * The tags used to organize, track, or control access for your flow. *
*/ - lastUpdatedBy?: string; + tags?: { [key: string]: string }; /** *- * Specifies when the flow was created. + * Describes the details of the most recent flow run. *
*/ - createdAt?: Date; + lastRunExecutionDetails?: ExecutionDetails; } export namespace FlowDefinition { @@ -4346,17 +4442,17 @@ export namespace ListConnectorEntitiesResponse { export interface ListFlowsRequest { /** *- * The pagination token for next page of data. - *
+ * Specifies the maximum number of items that should be returned in the result set. + * */ - nextToken?: string; + maxResults?: number; /** *- * Specifies the maximum number of items that should be returned in the result set. - *
+ * The pagination token for next page of data. + * */ - maxResults?: number; + nextToken?: string; } export namespace ListFlowsRequest { @@ -4435,10 +4531,10 @@ export namespace StartFlowRequest { export interface StartFlowResponse { /** *- * Returns the internal execution ID of an on-demand flow when the flow is started. For scheduled or event-triggered flows, this value is null. - *
+ * The flow's Amazon Resource Name (ARN). + * */ - executionId?: string; + flowArn?: string; /** *@@ -4450,10 +4546,10 @@ export interface StartFlowResponse { /** *
- * The flow's Amazon Resource Name (ARN). - *
+ * Returns the internal execution ID of an on-demand flow when the flow is started. For scheduled or event-triggered flows, this value is null. + * */ - flowArn?: string; + executionId?: string; } export namespace StartFlowResponse { @@ -4480,17 +4576,17 @@ export namespace StopFlowRequest { export interface StopFlowResponse { /** *- * Indicates the current status of the flow. + * The flow's Amazon Resource Name (ARN). *
*/ - flowStatus?: FlowStatus | string; + flowArn?: string; /** *- * The flow's Amazon Resource Name (ARN). + * Indicates the current status of the flow. *
*/ - flowArn?: string; + flowStatus?: FlowStatus | string; } export namespace StopFlowResponse { @@ -4579,24 +4675,24 @@ export namespace UntagResourceResponse { export interface UpdateConnectorProfileRequest { /** *
- * Indicates the connection mode and if it is public or private.
+ * The name of the connector profile and is unique for each ConnectorProfile
in the AWS Account.
*
- * Defines the connector-specific profile configuration and credentials. + * Indicates the connection mode and if it is public or private. *
*/ - connectorProfileConfig: ConnectorProfileConfig | undefined; + connectionMode: ConnectionMode | string | undefined; /** *
- * The name of the connector profile and is unique for each ConnectorProfile
in the AWS Account.
+ * Defines the connector-specific profile configuration and credentials.
*
+ * A description of the flow. + *
+ */ + description?: string; + /** ** The trigger settings that determine how and when the flow runs. @@ -4655,13 +4758,6 @@ export interface UpdateFlowRequest { *
*/ tasks: Task[] | undefined; - - /** - *- * A description of the flow. - *
- */ - description?: string; } export namespace UpdateFlowRequest { diff --git a/clients/client-appflow/protocols/Aws_restJson1.ts b/clients/client-appflow/protocols/Aws_restJson1.ts index 3629102fffc0..126bcac37c05 100644 --- a/clients/client-appflow/protocols/Aws_restJson1.ts +++ b/clients/client-appflow/protocols/Aws_restJson1.ts @@ -143,6 +143,9 @@ import { TriggerProperties, TriggerType, UnsupportedOperationException, + UpsolverDestinationProperties, + UpsolverMetadata, + UpsolverS3OutputFormatConfig, ValidationException, VeevaConnectorProfileCredentials, VeevaConnectorProfileProperties, @@ -2529,6 +2532,9 @@ const serializeAws_restJson1DestinationConnectorProperties = ( ...(input.Snowflake !== undefined && { Snowflake: serializeAws_restJson1SnowflakeDestinationProperties(input.Snowflake, context), }), + ...(input.Upsolver !== undefined && { + Upsolver: serializeAws_restJson1UpsolverDestinationProperties(input.Upsolver, context), + }), }; }; @@ -3112,6 +3118,34 @@ const serializeAws_restJson1TriggerProperties = (input: TriggerProperties, conte }; }; +const serializeAws_restJson1UpsolverDestinationProperties = ( + input: UpsolverDestinationProperties, + context: __SerdeContext +): any => { + return { + ...(input.bucketName !== undefined && { bucketName: input.bucketName }), + ...(input.bucketPrefix !== undefined && { bucketPrefix: input.bucketPrefix }), + ...(input.s3OutputFormatConfig !== undefined && { + s3OutputFormatConfig: serializeAws_restJson1UpsolverS3OutputFormatConfig(input.s3OutputFormatConfig, context), + }), + }; +}; + +const serializeAws_restJson1UpsolverS3OutputFormatConfig = ( + input: UpsolverS3OutputFormatConfig, + context: __SerdeContext +): any => { + return { + ...(input.aggregationConfig !== undefined && { + aggregationConfig: serializeAws_restJson1AggregationConfig(input.aggregationConfig, context), + }), + ...(input.fileType !== undefined && { fileType: input.fileType }), + ...(input.prefixConfig !== undefined && { + prefixConfig: serializeAws_restJson1PrefixConfig(input.prefixConfig, context), + }), + }; +}; + const serializeAws_restJson1VeevaConnectorProfileCredentials = ( input: VeevaConnectorProfileCredentials, context: __SerdeContext @@ -3364,6 +3398,10 @@ const deserializeAws_restJson1ConnectorMetadata = (output: any, context: __Serde output.Trendmicro !== undefined && output.Trendmicro !== null ? deserializeAws_restJson1TrendmicroMetadata(output.Trendmicro, context) : undefined, + Upsolver: + output.Upsolver !== undefined && output.Upsolver !== null + ? deserializeAws_restJson1UpsolverMetadata(output.Upsolver, context) + : undefined, Veeva: output.Veeva !== undefined && output.Veeva !== null ? deserializeAws_restJson1VeevaMetadata(output.Veeva, context) @@ -3555,6 +3593,10 @@ const deserializeAws_restJson1DestinationConnectorProperties = ( output.Snowflake !== undefined && output.Snowflake !== null ? deserializeAws_restJson1SnowflakeDestinationProperties(output.Snowflake, context) : undefined, + Upsolver: + output.Upsolver !== undefined && output.Upsolver !== null + ? deserializeAws_restJson1UpsolverDestinationProperties(output.Upsolver, context) + : undefined, } as any; }; @@ -4347,6 +4389,41 @@ const deserializeAws_restJson1TriggerTypeList = (output: any, context: __SerdeCo return (output || []).map((entry: any) => entry); }; +const deserializeAws_restJson1UpsolverDestinationProperties = ( + output: any, + context: __SerdeContext +): UpsolverDestinationProperties => { + return { + bucketName: output.bucketName !== undefined && output.bucketName !== null ? output.bucketName : undefined, + bucketPrefix: output.bucketPrefix !== undefined && output.bucketPrefix !== null ? output.bucketPrefix : undefined, + s3OutputFormatConfig: + output.s3OutputFormatConfig !== undefined && output.s3OutputFormatConfig !== null + ? deserializeAws_restJson1UpsolverS3OutputFormatConfig(output.s3OutputFormatConfig, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1UpsolverMetadata = (output: any, context: __SerdeContext): UpsolverMetadata => { + return {} as any; +}; + +const deserializeAws_restJson1UpsolverS3OutputFormatConfig = ( + output: any, + context: __SerdeContext +): UpsolverS3OutputFormatConfig => { + return { + aggregationConfig: + output.aggregationConfig !== undefined && output.aggregationConfig !== null + ? deserializeAws_restJson1AggregationConfig(output.aggregationConfig, context) + : undefined, + fileType: output.fileType !== undefined && output.fileType !== null ? output.fileType : undefined, + prefixConfig: + output.prefixConfig !== undefined && output.prefixConfig !== null + ? deserializeAws_restJson1PrefixConfig(output.prefixConfig, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1VeevaConnectorProfileProperties = ( output: any, context: __SerdeContext diff --git a/clients/client-application-insights/ApplicationInsights.ts b/clients/client-application-insights/ApplicationInsights.ts index 9c09a3b7c9f7..b04ef11639a8 100644 --- a/clients/client-application-insights/ApplicationInsights.ts +++ b/clients/client-application-insights/ApplicationInsights.ts @@ -133,14 +133,14 @@ import { import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; /** - * Amazon CloudWatch Application Insights for .NET and SQL Server is a service that
- * helps you detect common problems with your .NET and SQL Server-based applications. It
+ *
Amazon CloudWatch Application Insights is a service that + * helps you detect common problems with your applications. It * enables you to pinpoint the source of issues in your applications (built with technologies * such as Microsoft IIS, .NET, and Microsoft SQL Server), by providing key insights into * detected problems.
- *After you onboard your application, CloudWatch Application Insights for .NET and SQL - * Server identifies, recommends, and sets up metrics and logs. It continuously analyzes and + *
After you onboard your application, CloudWatch Application Insights identifies,
+ * recommends, and sets up metrics and logs. It continuously analyzes and
* correlates your metrics and logs for unusual behavior to surface actionable problems with
* your application. For example, if your application is slow and unresponsive and leading to
* HTTP 500 errors in your Application Load Balancer (ALB), Application Insights informs you
diff --git a/clients/client-application-insights/ApplicationInsightsClient.ts b/clients/client-application-insights/ApplicationInsightsClient.ts
index 0c159be485dd..4cbde13df17e 100644
--- a/clients/client-application-insights/ApplicationInsightsClient.ts
+++ b/clients/client-application-insights/ApplicationInsightsClient.ts
@@ -269,14 +269,14 @@ export type ApplicationInsightsClientResolvedConfig = __SmithyResolvedConfigurat
HostHeaderResolvedConfig;
/**
- *
Amazon CloudWatch Application Insights for .NET and SQL Server is a service that
- * helps you detect common problems with your .NET and SQL Server-based applications. It
+ *
Amazon CloudWatch Application Insights is a service that + * helps you detect common problems with your applications. It * enables you to pinpoint the source of issues in your applications (built with technologies * such as Microsoft IIS, .NET, and Microsoft SQL Server), by providing key insights into * detected problems.
- *After you onboard your application, CloudWatch Application Insights for .NET and SQL - * Server identifies, recommends, and sets up metrics and logs. It continuously analyzes and + *
After you onboard your application, CloudWatch Application Insights identifies, + * recommends, and sets up metrics and logs. It continuously analyzes and * correlates your metrics and logs for unusual behavior to surface actionable problems with * your application. For example, if your application is slow and unresponsive and leading to * HTTP 500 errors in your Application Load Balancer (ALB), Application Insights informs you diff --git a/clients/client-application-insights/models/models_0.ts b/clients/client-application-insights/models/models_0.ts index 98d658a9c95f..186c453e3c3f 100644 --- a/clients/client-application-insights/models/models_0.ts +++ b/clients/client-application-insights/models/models_0.ts @@ -1,7 +1,38 @@ import { SENSITIVE_STRING, SmithyException as __SmithyException } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer } from "@aws-sdk/types"; -export type Tier = "DEFAULT" | "DOT_NET_CORE" | "DOT_NET_WEB" | "DOT_NET_WORKER" | "SQL_SERVER"; +/** + *
+ * User does not have permissions to perform this action. + *
+ */ +export interface AccessDeniedException extends __SmithyException, $MetadataBearer { + name: "AccessDeniedException"; + $fault: "client"; + Message?: string; +} + +export namespace AccessDeniedException { + export const filterSensitiveLog = (obj: AccessDeniedException): any => ({ + ...obj, + }); +} + +export type Tier = + | "CUSTOM" + | "DEFAULT" + | "DOT_NET_CORE" + | "DOT_NET_WEB" + | "DOT_NET_WEB_TIER" + | "DOT_NET_WORKER" + | "JAVA_JMX" + | "MYSQL" + | "ORACLE" + | "POSTGRESQL" + | "SQL_SERVER" + | "SQL_SERVER_ALWAYSON_AVAILABILITY_GROUP"; + +export type OsType = "LINUX" | "WINDOWS"; /** *Describes a standalone resource or similarly grouped resources that the application is made @@ -14,19 +45,40 @@ export interface ApplicationComponent { ComponentName?: string; /** - *
The stack tier of the application component.
+ *+ * If logging is supported for the resource type, indicates whether the component has configured logs to be monitored. + *
*/ - Tier?: Tier | string; + ComponentRemarks?: string; /** *The resource type. Supported resource types include EC2 instances, Auto Scaling group, Classic ELB, Application ELB, and SQS Queue.
*/ ResourceType?: string; + /** + *+ * The operating system of the component. + *
+ */ + OsType?: OsType | string; + + /** + *The stack tier of the application component.
+ */ + Tier?: Tier | string; + /** *Indicates whether the application component is monitored.
*/ Monitor?: boolean; + + /** + *+ * Workloads detected in the application component. + *
+ */ + DetectedWorkload?: { [key: string]: { [key: string]: string } }; } export namespace ApplicationComponent { @@ -44,6 +96,19 @@ export interface ApplicationInfo { */ ResourceGroupName?: string; + /** + *The lifecycle of the application.
+ */ + LifeCycle?: string; + + /** + *+ * The SNS topic provided to Application Insights that is associated to the created opsItems to receive SNS notifications + * for opsItem updates. + *
+ */ + OpsItemSNSTopicArn?: string; + /** ** Indicates whether Application Insights will create opsItems for any problem detected by Application @@ -52,6 +117,13 @@ export interface ApplicationInfo { */ OpsCenterEnabled?: boolean; + /** + *
+ * Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as instance terminated
, failed deployment
, and others.
+ *
The issues on the user side that block Application Insights from successfully monitoring * an application. Example remarks include:
@@ -65,26 +137,6 @@ export interface ApplicationInfo { * */ Remarks?: string; - - /** - *
- * Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as instance terminated
, failed deployment
, and others.
- *
- * The SNS topic provided to Application Insights that is associated to the created opsItems to receive SNS notifications - * for opsItem updates. - *
- */ - OpsItemSNSTopicArn?: string; - - /** - *The lifecycle of the application.
- */ - LifeCycle?: string; } export namespace ApplicationInfo { @@ -108,9 +160,13 @@ export namespace BadRequestException { }); } -export type CloudWatchEventSource = "CODE_DEPLOY" | "EC2" | "HEALTH"; +export type CloudWatchEventSource = "CODE_DEPLOY" | "EC2" | "HEALTH" | "RDS"; -export type ConfigurationEventResourceType = "CLOUDFORMATION" | "CLOUDWATCH_ALARM" | "SSM_ASSOCIATION"; +export type ConfigurationEventResourceType = + | "CLOUDFORMATION" + | "CLOUDWATCH_ALARM" + | "CLOUDWATCH_LOG" + | "SSM_ASSOCIATION"; export type ConfigurationEventStatus = "ERROR" | "INFO" | "WARN"; @@ -122,10 +178,17 @@ export type ConfigurationEventStatus = "ERROR" | "INFO" | "WARN"; export interface ConfigurationEvent { /** *- * The name of the resource Application Insights attempted to configure. + * The resource monitored by Application Insights. *
*/ - EventResourceName?: string; + MonitoredResourceARN?: string; + + /** + *+ * The status of the configuration update event. Possible values include INFO, WARN, and ERROR. + *
+ */ + EventStatus?: ConfigurationEventStatus | string; /** *@@ -150,17 +213,10 @@ export interface ConfigurationEvent { /** *
- * The resource monitored by Application Insights. - *
- */ - MonitoredResourceARN?: string; - - /** - *- * The status of the configuration update event. Possible values include INFO, WARN, and ERROR. + * The name of the resource Application Insights attempted to configure. *
*/ - EventStatus?: ConfigurationEventStatus | string; + EventResourceName?: string; } export namespace ConfigurationEvent { @@ -197,18 +253,18 @@ export namespace ConfigurationEvent { * */ export interface Tag { + /** + *One part of a key-value pair that defines a tag. The maximum length of a tag key is + * 128 characters. The minimum length is 1 character.
+ */ + Key: string | undefined; + /** *The optional part of a key-value pair that defines a tag. The maximum length of a tag * value is 256 characters. The minimum length is 0 characters. If you don't want an * application to have a specific tag value, don't specify a value for this parameter.
*/ Value: string | undefined; - - /** - *One part of a key-value pair that defines a tag. The maximum length of a tag key is - * 128 characters. The minimum length is 1 character.
- */ - Key: string | undefined; } export namespace Tag { @@ -219,11 +275,9 @@ export namespace Tag { export interface CreateApplicationRequest { /** - *
- * Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as instance terminated
, failed deployment
, and others.
- *
The name of the resource group.
*/ - CWEMonitorEnabled?: boolean; + ResourceGroupName: string | undefined; /** *@@ -234,16 +288,18 @@ export interface CreateApplicationRequest { /** *
- * The SNS topic provided to Application Insights that is associated to the created opsItem. Allows you to
- * receive notifications for updates to the opsItem.
+ * Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as instance terminated
, failed deployment
, and others.
*
The name of the resource group.
+ *+ * The SNS topic provided to Application Insights that is associated to the created opsItem. Allows you to + * receive notifications for updates to the opsItem. + *
*/ - ResourceGroupName: string | undefined; + OpsItemSNSTopicArn?: string; /** *List of tags to add to the application. @@ -350,14 +406,14 @@ export namespace ValidationException { export interface CreateComponentRequest { /** - *
The name of the component.
+ *The name of the resource group.
*/ - ComponentName: string | undefined; + ResourceGroupName: string | undefined; /** - *The name of the resource group.
+ *The name of the component.
*/ - ResourceGroupName: string | undefined; + ComponentName: string | undefined; /** *The list of resource ARNs that belong to the component.
@@ -381,9 +437,9 @@ export namespace CreateComponentResponse { export interface CreateLogPatternRequest { /** - *The log pattern.
+ *The name of the resource group.
*/ - Pattern: string | undefined; + ResourceGroupName: string | undefined; /** *The name of the log pattern set.
@@ -391,19 +447,21 @@ export interface CreateLogPatternRequest { PatternSetName: string | undefined; /** - *Rank of the log pattern.
+ *The name of the log pattern.
*/ - Rank: number | undefined; + PatternName: string | undefined; /** - *The name of the log pattern.
+ *The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.
*/ - PatternName: string | undefined; + Pattern: string | undefined; /** - *The name of the resource group.
+ *Rank of the log pattern. Must be a value between 1
and 1,000,000
. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1
will be the first to get matched to a log line. A pattern of rank 1,000,000
will be last to get matched. When you configure custom log patterns from the console, a Low
severity pattern translates to a 750,000
rank. A Medium
severity pattern translates to a 500,000
rank. And a High
severity pattern translates to a 250,000
rank.
+ * Rank values less than 1
or greater than 1,000,000
are reserved for AWS-provided patterns.
+ *
The name of the log pattern. A log pattern name can contains at many as 50 characters, and it cannot - * be empty. The characters can be Unicode letters, digits or one of the following symbols: period, dash, underscore.
+ *The name of the log pattern. A log pattern name can contain as many as 30 characters, and it cannot + * be empty. The characters can be Unicode letters, digits, or one of the following symbols: period, dash, underscore.
*/ - PatternName?: string; + PatternSetName?: string; /** - *Rank of the log pattern.
+ *The name of the log pattern. A log pattern name can contain as many as 50 characters, and it cannot + * be empty. The characters can be Unicode letters, digits, or one of the following symbols: period, dash, underscore.
*/ - Rank?: number; + PatternName?: string; /** - *A regular expression that defines the log pattern. A log pattern can contains at many as 50 characters, and it cannot - * be empty.
+ *A regular expression that defines the log pattern. A log pattern can contain as many as 50 characters, and it cannot + * be empty. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.
*/ Pattern?: string; /** - *The name of the log pattern. A log pattern name can contains at many as 30 characters, and it cannot - * be empty. The characters can be Unicode letters, digits or one of the following symbols: period, dash, underscore.
+ *Rank of the log pattern. Must be a value between 1
and 1,000,000
. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1
will be the first to get matched to a log line. A pattern of rank 1,000,000
will be last to get matched. When you configure custom log patterns from the console, a Low
severity pattern translates to a 750,000
rank. A Medium
severity pattern translates to a 500,000
rank. And a High
severity pattern translates to a 250,000
rank.
+ * Rank values less than 1
or greater than 1,000,000
are reserved for AWS-provided patterns.
+ *
The name of the resource group.
+ */ + ResourceGroupName: string | undefined; + /** *The name of the log pattern set.
*/ @@ -521,11 +586,6 @@ export interface DeleteLogPatternRequest { *The name of the log pattern.
*/ PatternName: string | undefined; - - /** - *The name of the resource group.
- */ - ResourceGroupName: string | undefined; } export namespace DeleteLogPatternRequest { @@ -587,16 +647,16 @@ export namespace DescribeComponentRequest { } export interface DescribeComponentResponse { - /** - *The list of resource ARNs that belong to the component.
- */ - ResourceList?: string[]; - /** *Describes a standalone resource or similarly grouped resources that the application is made * up of.
*/ ApplicationComponent?: ApplicationComponent; + + /** + *The list of resource ARNs that belong to the component.
+ */ + ResourceList?: string[]; } export namespace DescribeComponentResponse { @@ -624,11 +684,6 @@ export namespace DescribeComponentConfigurationRequest { } export interface DescribeComponentConfigurationResponse { - /** - *The configuration settings of the component. The value is the escaped JSON of the configuration.
- */ - ComponentConfiguration?: string; - /** *Indicates whether the application component is monitored.
*/ @@ -641,6 +696,11 @@ export interface DescribeComponentConfigurationResponse { * */ Tier?: Tier | string; + + /** + *The configuration settings of the component. The value is the escaped JSON of the configuration.
+ */ + ComponentConfiguration?: string; } export namespace DescribeComponentConfigurationResponse { @@ -650,13 +710,6 @@ export namespace DescribeComponentConfigurationResponse { } export interface DescribeComponentConfigurationRecommendationRequest { - /** - *The tier of the application component. Supported tiers include
- * DOT_NET_CORE
, DOT_NET_WORKER
, DOT_NET_WEB
, SQL_SERVER
,
- * and DEFAULT
.
The name of the resource group.
*/ @@ -666,6 +719,13 @@ export interface DescribeComponentConfigurationRecommendationRequest { *The name of the component.
*/ ComponentName: string | undefined; + + /** + *The tier of the application component. Supported tiers include
+ * DOT_NET_CORE
, DOT_NET_WORKER
, DOT_NET_WEB
, SQL_SERVER
,
+ * and DEFAULT
.
The name of the resource group.
+ */ + ResourceGroupName: string | undefined; + /** *The name of the log pattern set.
*/ @@ -697,11 +762,6 @@ export interface DescribeLogPatternRequest { *The name of the log pattern.
*/ PatternName: string | undefined; - - /** - *The name of the resource group.
- */ - ResourceGroupName: string | undefined; } export namespace DescribeLogPatternRequest { @@ -748,199 +808,276 @@ export type LogFilter = "ERROR" | "INFO" | "WARN"; */ export interface Observation { /** - *The timestamp in the CloudWatch Logs that specifies when the matched line occurred.
+ *The ID of the observation type.
*/ - LineTime?: Date; + Id?: string; /** - *The service to which the AWS Health Event belongs, such as EC2.
+ *The time when the observation was first detected, in epoch seconds.
*/ - HealthService?: string; + StartTime?: Date; /** - *
- * The status of the CodeDeploy deployment, for example SUCCESS
or FAILURE
.
- *
The time when the observation ended, in epoch seconds.
*/ - CodeDeployState?: string; + EndTime?: Date; /** - *The deployment ID of the CodeDeploy-based observation related to the detected problem.
+ *The source type of the observation.
*/ - CodeDeployDeploymentId?: string; + SourceType?: string; /** - *- * The X-Ray request fault percentage for this node. - *
+ *The source resource ARN of the observation.
*/ - XRayFaultPercent?: number; + SourceARN?: string; /** - *The Amazon Resource Name (ARN) of the AWS Health Event-based observation.
+ *The log group name.
*/ - HealthEventArn?: string; + LogGroup?: string; /** - *The name of the observation metric.
+ *The timestamp in the CloudWatch Logs that specifies when the matched line occurred.
*/ - MetricName?: string; + LineTime?: Date; /** - *- * The X-Ray request count for this node. - *
+ *The log text of the observation.
*/ - XRayRequestCount?: number; + LogText?: string; /** - * The detail type of the CloudWatch Event-based observation, for example, EC2
- * Instance State-change Notification
.
The log filter of the observation.
*/ - CloudWatchEventDetailType?: string; + LogFilter?: LogFilter | string; /** - *The source type of the observation.
+ *The namespace of the observation metric.
*/ - SourceType?: string; + MetricNamespace?: string; /** - *- * The X-Ray request error percentage for this node. - *
+ *The name of the observation metric.
*/ - XRayErrorPercent?: number; + MetricName?: string; + + /** + *The unit of the source observation metric.
+ */ + Unit?: string; + + /** + *The value of the source observation metric.
+ */ + Value?: number; + + /** + *The ID of the CloudWatch Event-based observation related to the detected problem.
+ */ + CloudWatchEventId?: string; + + /** + *The source of the CloudWatch Event.
+ */ + CloudWatchEventSource?: CloudWatchEventSource | string; + + /** + * The detail type of the CloudWatch Event-based observation, for example, EC2
+ * Instance State-change Notification
.
The Amazon Resource Name (ARN) of the AWS Health Event-based observation.
+ */ + HealthEventArn?: string; + + /** + *The service to which the AWS Health Event belongs, such as EC2.
+ */ + HealthService?: string; + + /** + * The type of the AWS Health event, for example,
+ * AWS_EC2_POWER_CONNECTIVITY_ISSUE
.
The category of the AWS Health event, such as issue
.
The description of the AWS Health event provided by the service, such as Amazon EC2.
+ */ + HealthEventDescription?: string; + + /** + *The deployment ID of the CodeDeploy-based observation related to the detected problem.
+ */ + CodeDeployDeploymentId?: string; + /** *- * The X-Ray node request average latency for this node. + * The deployment group to which the CodeDeploy deployment belongs. *
*/ - XRayRequestAverageLatency?: number; + CodeDeployDeploymentGroup?: string; /** - *The namespace of the observation metric.
+ *
+ * The status of the CodeDeploy deployment, for example SUCCESS
or FAILURE
.
+ *
- * The state of the instance, such as STOPPING
or TERMINATING
.
+ * The CodeDeploy application to which the deployment belongs.
*
The ID of the CloudWatch Event-based observation related to the detected problem.
+ *+ * The instance group to which the CodeDeploy instance belongs. + *
*/ - CloudWatchEventId?: string; + CodeDeployInstanceGroupId?: string; /** *
- * The X-Ray request throttle percentage for this node.
+ * The state of the instance, such as STOPPING
or TERMINATING
.
*
The time when the observation was first detected, in epoch seconds.
+ *+ * The category of an RDS event. + *
*/ - StartTime?: Date; + RdsEventCategories?: string; /** - *The value of the source observation metric.
+ *+ * The message of an RDS event. + *
*/ - Value?: number; + RdsEventMessage?: string; /** *- * The type of the X-Ray node.
+ * The name of the S3 CloudWatch Event-based observation. + * */ - XRayNodeType?: string; + S3EventName?: string; /** - *The source resource ARN of the observation.
+ *+ * The Amazon Resource Name (ARN) of the step function execution-based observation. + *
*/ - SourceARN?: string; + StatesExecutionArn?: string; /** - *The log text of the observation.
+ *+ * The Amazon Resource Name (ARN) of the step function-based observation. + *
*/ - LogText?: string; + StatesArn?: string; /** - *The ID of the observation type.
+ *+ * The status of the step function-related observation. + *
*/ - Id?: string; + StatesStatus?: string; /** - *The description of the AWS Health event provided by the service, such as Amazon EC2.
+ *+ * The input to the step function-based observation. + *
*/ - HealthEventDescription?: string; + StatesInput?: string; /** - * The type of the AWS Health event, for example,
- * AWS_EC2_POWER_CONNECTIVITY_ISSUE
.
+ * The type of EBS CloudWatch event, such as createVolume
, deleteVolume
or attachVolume
.
+ *
- * The name of the X-Ray node.
+ * The result of an EBS CloudWatch event, such as failed
or succeeded
.
*
The time when the observation ended, in epoch seconds.
+ *+ * The cause of an EBS CloudWatch event. + *
*/ - EndTime?: Date; + EbsCause?: string; /** - *The unit of the source observation metric.
+ *+ * The request ID of an EBS CloudWatch event. + *
*/ - Unit?: string; + EbsRequestId?: string; /** *- * The CodeDeploy application to which the deployment belongs. + * The X-Ray request fault percentage for this node. *
*/ - CodeDeployApplication?: string; + XRayFaultPercent?: number; /** - *The log filter of the observation.
+ *+ * The X-Ray request throttle percentage for this node. + *
*/ - LogFilter?: LogFilter | string; + XRayThrottlePercent?: number; /** *- * The instance group to which the CodeDeploy instance belongs. + * The X-Ray request error percentage for this node. *
*/ - CodeDeployInstanceGroupId?: string; + XRayErrorPercent?: number; /** - *The source of the CloudWatch Event.
+ *+ * The X-Ray request count for this node. + *
*/ - CloudWatchEventSource?: CloudWatchEventSource | string; + XRayRequestCount?: number; /** - *The log group name.
+ *+ * The X-Ray node request average latency for this node. + *
*/ - LogGroup?: string; + XRayRequestAverageLatency?: number; /** *- * The deployment group to which the CodeDeploy deployment belongs. + * The name of the X-Ray node. *
*/ - CodeDeployDeploymentGroup?: string; + XRayNodeName?: string; + + /** + *+ * The type of the X-Ray node.
+ */ + XRayNodeType?: string; } export namespace Observation { @@ -988,19 +1125,19 @@ export type Status = "IGNORE" | "PENDING" | "RESOLVED"; */ export interface Problem { /** - *The name of the problem.
+ *The ID of the problem.
*/ - Title?: string; + Id?: string; /** - *A measure of the level of impact of the problem.
+ *The name of the problem.
*/ - SeverityLevel?: SeverityLevel | string; + Title?: string; /** - *The name of the resource group affected by the problem.
+ *A detailed analysis of the problem using machine learning.
*/ - ResourceGroupName?: string; + Insights?: string; /** *The status of the problem.
@@ -1008,34 +1145,34 @@ export interface Problem { Status?: Status | string; /** - *The time when the problem ended, in epoch seconds.
+ *The resource affected by the problem.
*/ - EndTime?: Date; + AffectedResource?: string; /** - *The ID of the problem.
+ *The time when the problem started, in epoch seconds.
*/ - Id?: string; + StartTime?: Date; /** - *A detailed analysis of the problem using machine learning.
+ *The time when the problem ended, in epoch seconds.
*/ - Insights?: string; + EndTime?: Date; /** - *Feedback provided by the user about the problem.
+ *A measure of the level of impact of the problem.
*/ - Feedback?: { [key: string]: FeedbackValue | string }; + SeverityLevel?: SeverityLevel | string; /** - *The time when the problem started, in epoch seconds.
+ *The name of the resource group affected by the problem.
*/ - StartTime?: Date; + ResourceGroupName?: string; /** - *The resource affected by the problem.
+ *Feedback provided by the user about the problem.
*/ - AffectedResource?: string; + Feedback?: { [key: string]: FeedbackValue | string }; } export namespace Problem { @@ -1100,16 +1237,16 @@ export namespace DescribeProblemObservationsResponse { } export interface ListApplicationsRequest { - /** - *The token to request the next page of results.
- */ - NextToken?: string; - /** *The maximum number of results to return in a single call. To retrieve the remaining
* results, make another call with the returned NextToken
value.
The token to request the next page of results.
+ */ + NextToken?: string; } export namespace ListApplicationsRequest { @@ -1139,9 +1276,9 @@ export namespace ListApplicationsResponse { export interface ListComponentsRequest { /** - *The token to request the next page of results.
+ *The name of the resource group.
*/ - NextToken?: string; + ResourceGroupName: string | undefined; /** *The maximum number of results to return in a single call. To retrieve the remaining @@ -1150,9 +1287,9 @@ export interface ListComponentsRequest { MaxResults?: number; /** - *
The name of the resource group.
+ *The token to request the next page of results.
*/ - ResourceGroupName: string | undefined; + NextToken?: string; } export namespace ListComponentsRequest { @@ -1163,14 +1300,14 @@ export namespace ListComponentsRequest { export interface ListComponentsResponse { /** - *The token to request the next page of results.
+ *The list of application components.
*/ - NextToken?: string; + ApplicationComponentList?: ApplicationComponent[]; /** - *The list of application components.
+ *The token to request the next page of results.
*/ - ApplicationComponentList?: ApplicationComponent[]; + NextToken?: string; } export namespace ListComponentsResponse { @@ -1181,18 +1318,25 @@ export namespace ListComponentsResponse { export interface ListConfigurationHistoryRequest { /** - *The NextToken
value returned from a previous paginated ListConfigurationHistory
request where
- * MaxResults
was used and the results exceeded the value of that parameter. Pagination
- * continues from the end of the previous results that returned the NextToken
value. This
- * value is null
when there are no more results to return.
Resource group to which the application belongs.
*/ - NextToken?: string; + ResourceGroupName?: string; /** *The start time of the event.
*/ StartTime?: Date; + /** + *The end time of the event.
+ */ + EndTime?: Date; + + /** + *The status of the configuration update event. Possible values include INFO, WARN, and ERROR.
+ */ + EventStatus?: ConfigurationEventStatus | string; + /** * The maximum number of results returned by ListConfigurationHistory
in
* paginated output. When this parameter is used, ListConfigurationHistory
@@ -1205,19 +1349,12 @@ export interface ListConfigurationHistoryRequest {
MaxResults?: number;
/**
- *
The end time of the event.
- */ - EndTime?: Date; - - /** - *The status of the configuration update event. Possible values include INFO, WARN, and ERROR.
- */ - EventStatus?: ConfigurationEventStatus | string; - - /** - *Resource group to which the application belongs.
+ *The NextToken
value returned from a previous paginated ListConfigurationHistory
request where
+ * MaxResults
was used and the results exceeded the value of that parameter. Pagination
+ * continues from the end of the previous results that returned the NextToken
value. This
+ * value is null
when there are no more results to return.
The list of configuration events and their corresponding details.
+ */ + EventList?: ConfigurationEvent[]; + /** *The NextToken
value to include in a future
* ListConfigurationHistory
request. When the results of a
@@ -1235,11 +1377,6 @@ export interface ListConfigurationHistoryResponse {
* there are no more results to return.
The list of configuration events and their corresponding details.
- */ - EventList?: ConfigurationEvent[]; } export namespace ListConfigurationHistoryResponse { @@ -1250,14 +1387,14 @@ export namespace ListConfigurationHistoryResponse { export interface ListLogPatternsRequest { /** - *The token to request the next page of results.
+ *The name of the resource group.
*/ - NextToken?: string; + ResourceGroupName: string | undefined; /** - *The name of the resource group.
+ *The name of the log pattern set.
*/ - ResourceGroupName: string | undefined; + PatternSetName?: string; /** *The maximum number of results to return in a single call. To retrieve the remaining @@ -1266,9 +1403,9 @@ export interface ListLogPatternsRequest { MaxResults?: number; /** - *
The name of the log pattern set.
+ *The token to request the next page of results.
*/ - PatternSetName?: string; + NextToken?: string; } export namespace ListLogPatternsRequest { @@ -1284,15 +1421,15 @@ export interface ListLogPatternsResponse { ResourceGroupName?: string; /** - *The token used to retrieve the next page of results. This value is null
- * when there are no more results to return.
The list of log patterns.
*/ - NextToken?: string; + LogPatterns?: LogPattern[]; /** - *The list of log patterns.
+ *The token used to retrieve the next page of results. This value is null
+ * when there are no more results to return.
The token to request the next page of results.
+ *The name of the resource group.
*/ - NextToken?: string; + ResourceGroupName: string | undefined; /** *The maximum number of results to return in a single call. To retrieve the remaining @@ -1314,9 +1451,9 @@ export interface ListLogPatternSetsRequest { MaxResults?: number; /** - *
The name of the resource group.
+ *The token to request the next page of results.
*/ - ResourceGroupName: string | undefined; + NextToken?: string; } export namespace ListLogPatternSetsRequest { @@ -1332,15 +1469,15 @@ export interface ListLogPatternSetsResponse { ResourceGroupName?: string; /** - *The token used to retrieve the next page of results. This value is null
- * when there are no more results to return.
The list of log pattern sets.
*/ - NextToken?: string; + LogPatternSets?: string[]; /** - *The list of log pattern sets.
+ *The token used to retrieve the next page of results. This value is null
+ * when there are no more results to return.
The token to request the next page of results.
- */ - NextToken?: string; - - /** - *The maximum number of results to return in a single call. To retrieve the remaining
- * results, make another call with the returned NextToken
value.
The name of the resource group.
*/ @@ -1377,6 +1503,17 @@ export interface ListProblemsRequest { * past seven days are returned. */ EndTime?: Date; + + /** + *The maximum number of results to return in a single call. To retrieve the remaining
+ * results, make another call with the returned NextToken
value.
The token to request the next page of results.
+ */ + NextToken?: string; } export namespace ListProblemsRequest { @@ -1434,6 +1571,11 @@ export namespace ListTagsForResourceResponse { } export interface TagResourceRequest { + /** + *The Amazon Resource Name (ARN) of the application that you want to add one or more tags to.
+ */ + ResourceARN: string | undefined; + /** *A list of tags that to add to the application. A tag consists of a required
* tag key (Key
) and an associated tag value (Value
). The maximum
@@ -1441,11 +1583,6 @@ export interface TagResourceRequest {
* characters.
The Amazon Resource Name (ARN) of the application that you want to add one or more tags to.
- */ - ResourceARN: string | undefined; } export namespace TagResourceRequest { @@ -1513,6 +1650,11 @@ export namespace UntagResourceResponse { } export interface UpdateApplicationRequest { + /** + *The name of the resource group.
+ */ + ResourceGroupName: string | undefined; + /** *
* When set to true
, creates opsItems for any problems detected on an application.
@@ -1520,6 +1662,13 @@ export interface UpdateApplicationRequest {
*/
OpsCenterEnabled?: boolean;
+ /**
+ *
+ * Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as instance terminated
, failed deployment
, and others.
+ *
* The SNS topic provided to Application Insights that is associated to the created opsItem. Allows you to @@ -1532,18 +1681,6 @@ export interface UpdateApplicationRequest { * Disassociates the SNS topic from the opsItem created for detected problems.
*/ RemoveSNSTopic?: boolean; - - /** - *
- * Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as instance terminated
, failed deployment
, and others.
- *
The name of the resource group.
- */ - ResourceGroupName: string | undefined; } export namespace UpdateApplicationRequest { @@ -1567,14 +1704,14 @@ export namespace UpdateApplicationResponse { export interface UpdateComponentRequest { /** - *The name of the component.
+ *The name of the resource group.
*/ - ComponentName: string | undefined; + ResourceGroupName: string | undefined; /** - *The list of resource ARNs that belong to the component.
+ *The name of the component.
*/ - ResourceList?: string[]; + ComponentName: string | undefined; /** *The new name of the component.
@@ -1582,9 +1719,9 @@ export interface UpdateComponentRequest { NewComponentName?: string; /** - *The name of the resource group.
+ *The list of resource ARNs that belong to the component.
*/ - ResourceGroupName: string | undefined; + ResourceList?: string[]; } export namespace UpdateComponentRequest { @@ -1602,6 +1739,11 @@ export namespace UpdateComponentResponse { } export interface UpdateComponentConfigurationRequest { + /** + *The name of the resource group.
+ */ + ResourceGroupName: string | undefined; + /** *The name of the component.
*/ @@ -1612,14 +1754,6 @@ export interface UpdateComponentConfigurationRequest { */ Monitor?: boolean; - /** - *The configuration settings of the component. The value is the escaped JSON of the configuration. For
- * more information about the JSON format, see Working with JSON.
- * You can send a request to DescribeComponentConfigurationRecommendation
to see the recommended configuration for a component. For the complete
- * format of the component configuration file, see Component Configuration.
The tier of the application component. Supported tiers include DOT_NET_WORKER
,
* DOT_NET_WEB
, DOT_NET_CORE
, SQL_SERVER
, and DEFAULT
.
The name of the resource group.
+ *The configuration settings of the component. The value is the escaped JSON of the configuration. For
+ * more information about the JSON format, see Working with JSON.
+ * You can send a request to DescribeComponentConfigurationRecommendation
to see the recommended configuration for a component. For the complete
+ * format of the component configuration file, see Component Configuration.
Rank of the log pattern.
+ *The name of the log pattern set.
*/ - Rank?: number; + PatternSetName: string | undefined; /** - *The log pattern.
+ *The name of the log pattern.
*/ - Pattern?: string; + PatternName: string | undefined; /** - *The name of the log pattern.
+ *The log pattern. The pattern must be DFA compatible. Patterns that utilize forward lookahead or backreference constructions are not supported.
*/ - PatternName: string | undefined; + Pattern?: string; /** - *The name of the log pattern set.
+ *Rank of the log pattern. Must be a value between 1
and 1,000,000
. The patterns are sorted by rank, so we recommend that you set your highest priority patterns with the lowest rank. A pattern of rank 1
will be the first to get matched to a log line. A pattern of rank 1,000,000
will be last to get matched. When you configure custom log patterns from the console, a Low
severity pattern translates to a 750,000
rank. A Medium
severity pattern translates to a 500,000
rank. And a High
severity pattern translates to a 250,000
rank.
+ * Rank values less than 1
or greater than 1,000,000
are reserved for AWS-provided patterns.
+ *
The successfully created log pattern.
+ *The name of the resource group.
*/ - LogPattern?: LogPattern; + ResourceGroupName?: string; /** - *The name of the resource group.
+ *The successfully created log pattern.
*/ - ResourceGroupName?: string; + LogPattern?: LogPattern; } export namespace UpdateLogPatternResponse { diff --git a/clients/client-application-insights/protocols/Aws_json1_1.ts b/clients/client-application-insights/protocols/Aws_json1_1.ts index b0f83c64e039..b398ac21b400 100644 --- a/clients/client-application-insights/protocols/Aws_json1_1.ts +++ b/clients/client-application-insights/protocols/Aws_json1_1.ts @@ -50,6 +50,7 @@ import { } from "../commands/UpdateComponentConfigurationCommand"; import { UpdateLogPatternCommandInput, UpdateLogPatternCommandOutput } from "../commands/UpdateLogPatternCommand"; import { + AccessDeniedException, ApplicationComponent, ApplicationInfo, BadRequestException, @@ -109,6 +110,7 @@ import { TagResourceRequest, TagResourceResponse, TagsAlreadyExistException, + Tier, TooManyTagsException, UntagResourceRequest, UntagResourceResponse, @@ -513,6 +515,14 @@ const deserializeAws_json1_1CreateApplicationCommandError = async ( const errorTypeParts: String = parsedOutput.body["__type"].split("#"); errorCode = errorTypeParts[1] === undefined ? errorTypeParts[0] : errorTypeParts[1]; switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.applicationinsights#AccessDeniedException": + response = { + ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InternalServerException": case "com.amazonaws.applicationinsights#InternalServerException": response = { @@ -2440,6 +2450,21 @@ const deserializeAws_json1_1UpdateLogPatternCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +const deserializeAws_json1_1AccessDeniedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): PromiseTo attach an Application Load Balancer or a Network Load Balancer, use the AttachLoadBalancerTargetGroups API operation instead.
+ *To attach an Application Load Balancer, Network Load Balancer, or Gateway Load + * Balancer, use the AttachLoadBalancerTargetGroups API operation + * instead.
*Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling * registers the running instances with these Classic Load Balancers.
*To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach the load balancer from the Auto Scaling * group, call the DetachLoadBalancers API.
- *For more information, see Attaching a load - * balancer to your Auto Scaling group in the - * Amazon EC2 Auto Scaling User Guide.
+ *For more information, see Elastic Load Balancing and + * Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*/ public attachLoadBalancers( args: AttachLoadBalancersCommandInput, @@ -372,13 +373,24 @@ export class AutoScaling extends AutoScalingClient { /** *Attaches one or more target groups to the specified Auto Scaling group.
+ *This operation is used with the following load balancer types:
+ *Application Load Balancer - Operates at the application layer (layer 7) and + * supports HTTP and HTTPS.
+ *Network Load Balancer - Operates at the transport layer (layer 4) and + * supports TCP, TLS, and UDP.
+ *Gateway Load Balancer - Operates at the network layer (layer 3).
+ *To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups API. To detach the target group from * the Auto Scaling group, call the DetachLoadBalancerTargetGroups API.
- *With Application Load Balancers and Network Load Balancers, instances are registered - * as targets with a target group. With Classic Load Balancers, instances are registered - * with the load balancer. For more information, see Attaching a load - * balancer to your Auto Scaling group in the - * Amazon EC2 Auto Scaling User Guide.
+ *For more information, see Elastic Load Balancing and + * Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*/ public attachLoadBalancerTargetGroups( args: AttachLoadBalancerTargetGroupsCommandInput, @@ -1278,7 +1290,7 @@ export class AutoScaling extends AutoScalingClient { /** *Describes the load balancers for the specified Auto Scaling group.
*This operation describes only Classic Load Balancers. If you have Application Load - * Balancers or Network Load Balancers, use the DescribeLoadBalancerTargetGroups API instead.
+ * Balancers, Network Load Balancers, or Gateway Load Balancers, use the DescribeLoadBalancerTargetGroups API instead. */ public describeLoadBalancers( args: DescribeLoadBalancersCommandInput, @@ -1653,7 +1665,7 @@ export class AutoScaling extends AutoScalingClient { /** *Detaches one or more Classic Load Balancers from the specified Auto Scaling group.
*This operation detaches only Classic Load Balancers. If you have Application Load - * Balancers or Network Load Balancers, use the DetachLoadBalancerTargetGroups API instead.
+ * Balancers, Network Load Balancers, or Gateway Load Balancers, use the DetachLoadBalancerTargetGroups API instead. *When you detach a load balancer, it enters the Removing
state while
* deregistering the instances in the group. When all instances are deregistered, then you
* can no longer describe the load balancer using the DescribeLoadBalancers API call. The instances remain running.
Attaches one or more target groups to the specified Auto Scaling group.
+ *This operation is used with the following load balancer types:
+ *Application Load Balancer - Operates at the application layer (layer 7) and + * supports HTTP and HTTPS.
+ *Network Load Balancer - Operates at the transport layer (layer 4) and + * supports TCP, TLS, and UDP.
+ *Gateway Load Balancer - Operates at the network layer (layer 3).
+ *To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups API. To detach the target group from * the Auto Scaling group, call the DetachLoadBalancerTargetGroups API.
- *With Application Load Balancers and Network Load Balancers, instances are registered - * as targets with a target group. With Classic Load Balancers, instances are registered - * with the load balancer. For more information, see Attaching a load - * balancer to your Auto Scaling group in the - * Amazon EC2 Auto Scaling User Guide.
+ *For more information, see Elastic Load Balancing and + * Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*/ export class AttachLoadBalancerTargetGroupsCommand extends $Command< AttachLoadBalancerTargetGroupsCommandInput, diff --git a/clients/client-auto-scaling/commands/AttachLoadBalancersCommand.ts b/clients/client-auto-scaling/commands/AttachLoadBalancersCommand.ts index 99fe89fa1fda..12b4f95a97a1 100644 --- a/clients/client-auto-scaling/commands/AttachLoadBalancersCommand.ts +++ b/clients/client-auto-scaling/commands/AttachLoadBalancersCommand.ts @@ -22,15 +22,16 @@ export type AttachLoadBalancersCommandOutput = AttachLoadBalancersResultType & _ /** *To attach an Application Load Balancer or a Network Load Balancer, use the AttachLoadBalancerTargetGroups API operation instead.
+ *To attach an Application Load Balancer, Network Load Balancer, or Gateway Load + * Balancer, use the AttachLoadBalancerTargetGroups API operation + * instead.
*Attaches one or more Classic Load Balancers to the specified Auto Scaling group. Amazon EC2 Auto Scaling * registers the running instances with these Classic Load Balancers.
*To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers API. To detach the load balancer from the Auto Scaling * group, call the DetachLoadBalancers API.
- *For more information, see Attaching a load - * balancer to your Auto Scaling group in the - * Amazon EC2 Auto Scaling User Guide.
+ *For more information, see Elastic Load Balancing and + * Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
*/ export class AttachLoadBalancersCommand extends $Command< AttachLoadBalancersCommandInput, diff --git a/clients/client-auto-scaling/commands/DescribeLoadBalancersCommand.ts b/clients/client-auto-scaling/commands/DescribeLoadBalancersCommand.ts index 6f68b723c798..24fdcc145fe8 100644 --- a/clients/client-auto-scaling/commands/DescribeLoadBalancersCommand.ts +++ b/clients/client-auto-scaling/commands/DescribeLoadBalancersCommand.ts @@ -23,7 +23,7 @@ export type DescribeLoadBalancersCommandOutput = DescribeLoadBalancersResponse & /** *Describes the load balancers for the specified Auto Scaling group.
*This operation describes only Classic Load Balancers. If you have Application Load - * Balancers or Network Load Balancers, use the DescribeLoadBalancerTargetGroups API instead.
+ * Balancers, Network Load Balancers, or Gateway Load Balancers, use the DescribeLoadBalancerTargetGroups API instead. */ export class DescribeLoadBalancersCommand extends $Command< DescribeLoadBalancersCommandInput, diff --git a/clients/client-auto-scaling/commands/DetachLoadBalancersCommand.ts b/clients/client-auto-scaling/commands/DetachLoadBalancersCommand.ts index cac0a93e3a01..dbaa0558c13c 100644 --- a/clients/client-auto-scaling/commands/DetachLoadBalancersCommand.ts +++ b/clients/client-auto-scaling/commands/DetachLoadBalancersCommand.ts @@ -23,7 +23,7 @@ export type DetachLoadBalancersCommandOutput = DetachLoadBalancersResultType & _ /** *Detaches one or more Classic Load Balancers from the specified Auto Scaling group.
*This operation detaches only Classic Load Balancers. If you have Application Load - * Balancers or Network Load Balancers, use the DetachLoadBalancerTargetGroups API instead.
+ * Balancers, Network Load Balancers, or Gateway Load Balancers, use the DetachLoadBalancerTargetGroups API instead. *When you detach a load balancer, it enters the Removing
state while
* deregistering the instances in the group. When all instances are deregistered, then you
* can no longer describe the load balancer using the DescribeLoadBalancers API call. The instances remain running.
The Amazon Resource Names (ARN) of the target groups. You can specify up to 10 target - * groups.
+ * groups. To get the ARN of a target group, use the Elastic Load Balancing DescribeTargetGroups API operation. */ TargetGroupARNs: string[] | undefined; } @@ -544,15 +544,17 @@ export namespace CompleteLifecycleActionType { export interface LaunchTemplateSpecification { /** *The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created
- * using the Amazon EC2 CreateLaunchTemplate API. You must specify either a
- * LaunchTemplateId
or a LaunchTemplateName
.
Conditional: You must specify either a LaunchTemplateId
or a
+ * LaunchTemplateName
.
The name of the launch template. To get the template name, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created
- * using the Amazon EC2 CreateLaunchTemplate API. You must specify either a
- * LaunchTemplateId
or a LaunchTemplateName
.
Conditional: You must specify either a LaunchTemplateId
or a
+ * LaunchTemplateName
.
Indicates how to allocate instances across Spot Instance pools. If the allocation
- * strategy is lowest-price
, the Auto Scaling group launches instances using the Spot
- * pools with the lowest price, and evenly allocates your instances across the number of
- * Spot pools that you specify. If the allocation strategy is
- * capacity-optimized
, the Auto Scaling group launches instances using Spot pools
- * that are optimally chosen based on the available Spot capacity. Defaults to
+ * strategy is capacity-optimized
(recommended), the Auto Scaling group launches
+ * instances using Spot pools that are optimally chosen based on the available Spot
+ * capacity. If the allocation strategy is lowest-price
, the Auto Scaling group
+ * launches instances using the Spot pools with the lowest price, and evenly allocates your
+ * instances across the number of Spot pools that you specify. Defaults to
* lowest-price
if not specified.
The number of Spot Instance pools across which to allocate your Spot Instances. The
- * Spot pools are determined from the different instance types in the overrides. Defaults
- * to 2 if not specified. Valid only when the Spot allocation strategy is
- * lowest-price
.
Valid Range: Minimum value of 1. Maximum value of 20.
+ * Spot pools are determined from the different instance types in the overrides. Valid only + * when the Spot allocation strategy islowest-price
. Value must be in the
+ * range of 1 to 20. Defaults to 2 if not specified.
*/
SpotInstancePools?: number;
/**
* The maximum price per unit hour that you are willing to pay for a Spot Instance. If - * you leave the value of this parameter blank (which is the default), the maximum Spot - * price is set at the On-Demand price. To remove a value that you previously set, include - * the parameter but leave the value blank.
+ * you leave the value at its default (empty), Amazon EC2 Auto Scaling uses the On-Demand price as the + * maximum Spot price. To remove a value that you previously set, include the property but + * specify an empty string ("") for the value. */ SpotMaxPrice?: string; } @@ -769,8 +770,8 @@ export interface LaunchTemplateOverrides { * fulfilled, even if this results in an overage. For example, if there are 2 units * remaining to fulfill capacity, and Amazon EC2 Auto Scaling can only provision an instance with a *WeightedCapacity
of 5 units, the instance is provisioned, and the
- * desired capacity is exceeded by 3 units. For more information, see Instance weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
- * Valid Range: Minimum value of 1. Maximum value of 999.
+ * desired capacity is exceeded by 3 units. For more information, see Instance weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. + * Value must be in the range of 1 to 999. */ WeightedCapacity?: string; @@ -930,9 +931,6 @@ export interface CreateAutoScalingGroupType { * Auto Scaling groups with multiple * instance types and purchase options in the Amazon EC2 Auto Scaling User * Guide. - *Conditional: You must specify either a launch template (LaunchTemplate
or
- * MixedInstancesPolicy
) or a launch configuration
- * (LaunchConfigurationName
or InstanceId
).
A list of Classic Load Balancers associated with this Auto Scaling group. For
- * Application Load Balancers and Network Load Balancers, specify
- * TargetGroupARNs
instead.
TargetGroupARNs
property instead.
*/
LoadBalancerNames?: string[];
/**
* The Amazon Resource Names (ARN) of the target groups to associate with the Auto Scaling group. * Instances are registered as targets in a target group, and traffic is routed to the - * target group. For more information, see Elastic Load - * Balancing and Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
+ * target group. For more information, see Elastic Load Balancing and + * Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. */ TargetGroupARNs?: string[]; @@ -1488,7 +1486,7 @@ export interface CreateLaunchConfigurationType { * parameter todedicated
.
* If you specify PlacementTenancy
, you must specify at least one subnet for
* VPCZoneIdentifier
when you create your group.
For more information, see Configuring instance tenancy with Amazon EC2 Auto Scaling in the + *
For more information, see Configuring instance tenancy with Amazon EC2 Auto Scaling in the * Amazon EC2 Auto Scaling User Guide.
*Valid Values: default
| dedicated
*
InService
.
* If you attach a load balancer to an existing Auto Scaling group, the initial state is
* Adding
. The state transitions to Added
after all instances
- * in the group are registered with the load balancer. If Elastic Load Balancing health
- * checks are enabled for the load balancer, the state transitions to
- * InService
after at least one instance in the group passes the health
- * check. If EC2 health checks are enabled instead, the load balancer remains in the
- * Added
state.
InService
after at least
+ * one instance in the group passes the health check. If EC2 health checks are enabled
+ * instead, the load balancer remains in the Added
state.
*/
export interface LoadBalancerState {
/**
@@ -2836,9 +2833,8 @@ export interface LoadBalancerState {
*
* Removing
- The instances in the group are being deregistered from
- * the load balancer. If connection draining is enabled, Elastic Load Balancing
- * waits for in-flight requests to complete before deregistering the
- * instances.
@@ -2906,10 +2902,10 @@ export namespace DescribeLoadBalancerTargetGroupsRequest { *
Describes the state of a target group.
*If you attach a target group to an existing Auto Scaling group, the initial state is
* Adding
. The state transitions to Added
after all Auto Scaling
- * instances are registered with the target group. If Elastic Load Balancing health checks
- * are enabled, the state transitions to InService
after at least one Auto Scaling
- * instance passes the health check. If EC2 health checks are enabled instead, the target
- * group remains in the Added
state.
InService
after at least one Auto Scaling instance passes the
+ * health check. If EC2 health checks are enabled instead, the target group remains in the
+ * Added
state.
*/
export interface LoadBalancerTargetGroupState {
/**
@@ -2938,8 +2934,8 @@ export interface LoadBalancerTargetGroupState {
*
* Removing
- The Auto Scaling instances are being deregistered from the
- * target group. If connection draining is enabled, Elastic Load Balancing waits
- * for in-flight requests to complete before deregistering the instances.
diff --git a/clients/client-batch/Batch.ts b/clients/client-batch/Batch.ts index 728b9362effb..496c344d2c47 100644 --- a/clients/client-batch/Batch.ts +++ b/clients/client-batch/Batch.ts @@ -83,23 +83,22 @@ import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; /** *
AWS Batch enables you to run batch computing workloads on the AWS Cloud. Batch computing is a common way for * developers, scientists, and engineers to access large amounts of compute resources, and AWS Batch removes the - * undifferentiated heavy lifting of configuring and managing the required infrastructure. AWS Batch will be familiar - * to users of traditional batch computing software. This service can efficiently provision resources in response to - * jobs submitted in order to eliminate capacity constraints, reduce compute costs, and deliver results - * quickly.
+ * undifferentiated heavy lifting of configuring and managing the required infrastructure. AWS Batch will be familiar to + * users of traditional batch computing software. This service can efficiently provision resources in response to jobs + * submitted in order to eliminate capacity constraints, reduce compute costs, and deliver results quickly. *As a fully managed service, AWS Batch enables developers, scientists, and engineers to run batch computing * workloads of any scale. AWS Batch automatically provisions compute resources and optimizes the workload distribution - * based on the quantity and scale of the workloads. With AWS Batch, there is no need to install or manage batch - * computing software, which allows you to focus on analyzing results and solving problems. AWS Batch reduces - * operational complexities, saves time, and reduces costs, which makes it easy for developers, scientists, and - * engineers to run their batch jobs in the AWS Cloud.
+ * based on the quantity and scale of the workloads. With AWS Batch, there is no need to install or manage batch computing + * software, which allows you to focus on analyzing results and solving problems. AWS Batch reduces operational + * complexities, saves time, and reduces costs, which makes it easy for developers, scientists, and engineers to run + * their batch jobs in the AWS Cloud. */ export class Batch extends BatchClient { /** *Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED
, PENDING
, or
* RUNNABLE
state are cancelled. Jobs that have progressed to STARTING
or
- * RUNNING
are not cancelled (but the API operation still succeeds, even if no job is cancelled);
- * these jobs must be terminated with the TerminateJob operation.
RUNNING
are not cancelled (but the API operation still succeeds, even if no job is cancelled); these
+ * jobs must be terminated with the TerminateJob operation.
*/
public cancelJob(args: CancelJobCommandInput, options?: __HttpHandlerOptions): PromiseIn a managed compute environment, AWS Batch manages the capacity and instance types of the compute resources * within the environment. This is based on the compute resource specification that you define or the launch template that you - * specify when you create the compute environment. You can choose to use Amazon EC2 On-Demand Instances or Spot Instances - * in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch - * when the Spot Instance price is below a specified percentage of the On-Demand price.
+ * specify when you create the compute environment. You can choose to use Amazon EC2 On-Demand Instances or Spot Instances in + * your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the + * Spot Instance price is below a specified percentage of the On-Demand price. *Multi-node parallel jobs are not supported on Spot Instances.
*AWS Batch does not upgrade the AMIs in a compute environment after it is created (for example, when a newer * version of the Amazon ECS-optimized AMI is available). You are responsible for the management of the guest operating @@ -193,11 +192,11 @@ export class Batch extends BatchClient { } /** - *
Creates an AWS Batch job queue. When you create a job queue, you associate one or more compute environments to - * the queue and assign an order of preference for the compute environments.
+ *Creates an AWS Batch job queue. When you create a job queue, you associate one or more compute environments to the + * queue and assign an order of preference for the compute environments.
*You also set a priority to the job queue that determines the order in which the AWS Batch scheduler places jobs - * onto its associated compute environments. For example, if a compute environment is associated with more than one - * job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute + * onto its associated compute environments. For example, if a compute environment is associated with more than one job + * queue, the job queue with a higher priority is given preference for scheduling jobs to that compute * environment.
*/ public createJobQueue( @@ -365,8 +364,8 @@ export class Batch extends BatchClient { } /** - *Describes a list of job definitions. You can specify a status
(such as ACTIVE
) to
- * only return job definitions that match that status.
Describes a list of job definitions. You can specify a status
(such as ACTIVE
) to only
+ * return job definitions that match that status.
Associates the specified tags to a resource with the specified resourceArn
. If existing tags on a
- * resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags
- * associated with that resource are deleted as well. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job
+ * resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags
+ * associated with that resource are deleted as well. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job
* queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
AWS Batch enables you to run batch computing workloads on the AWS Cloud. Batch computing is a common way for * developers, scientists, and engineers to access large amounts of compute resources, and AWS Batch removes the - * undifferentiated heavy lifting of configuring and managing the required infrastructure. AWS Batch will be familiar - * to users of traditional batch computing software. This service can efficiently provision resources in response to - * jobs submitted in order to eliminate capacity constraints, reduce compute costs, and deliver results - * quickly.
+ * undifferentiated heavy lifting of configuring and managing the required infrastructure. AWS Batch will be familiar to + * users of traditional batch computing software. This service can efficiently provision resources in response to jobs + * submitted in order to eliminate capacity constraints, reduce compute costs, and deliver results quickly. *As a fully managed service, AWS Batch enables developers, scientists, and engineers to run batch computing * workloads of any scale. AWS Batch automatically provisions compute resources and optimizes the workload distribution - * based on the quantity and scale of the workloads. With AWS Batch, there is no need to install or manage batch - * computing software, which allows you to focus on analyzing results and solving problems. AWS Batch reduces - * operational complexities, saves time, and reduces costs, which makes it easy for developers, scientists, and - * engineers to run their batch jobs in the AWS Cloud.
+ * based on the quantity and scale of the workloads. With AWS Batch, there is no need to install or manage batch computing + * software, which allows you to focus on analyzing results and solving problems. AWS Batch reduces operational + * complexities, saves time, and reduces costs, which makes it easy for developers, scientists, and engineers to run + * their batch jobs in the AWS Cloud. */ export class BatchClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-batch/commands/CancelJobCommand.ts b/clients/client-batch/commands/CancelJobCommand.ts index 96400bf1945c..52b6932f6714 100644 --- a/clients/client-batch/commands/CancelJobCommand.ts +++ b/clients/client-batch/commands/CancelJobCommand.ts @@ -23,8 +23,8 @@ export type CancelJobCommandOutput = CancelJobResponse & __MetadataBearer; /** *Cancels a job in an AWS Batch job queue. Jobs that are in the SUBMITTED
, PENDING
, or
* RUNNABLE
state are cancelled. Jobs that have progressed to STARTING
or
- * RUNNING
are not cancelled (but the API operation still succeeds, even if no job is cancelled);
- * these jobs must be terminated with the TerminateJob operation.
RUNNING
are not cancelled (but the API operation still succeeds, even if no job is cancelled); these
+ * jobs must be terminated with the TerminateJob operation.
*/
export class CancelJobCommand extends $Command<
CancelJobCommandInput,
diff --git a/clients/client-batch/commands/CreateComputeEnvironmentCommand.ts b/clients/client-batch/commands/CreateComputeEnvironmentCommand.ts
index 97d97572ea7c..97e79dcd9441 100644
--- a/clients/client-batch/commands/CreateComputeEnvironmentCommand.ts
+++ b/clients/client-batch/commands/CreateComputeEnvironmentCommand.ts
@@ -25,9 +25,9 @@ export type CreateComputeEnvironmentCommandOutput = CreateComputeEnvironmentResp
* environments.
* In a managed compute environment, AWS Batch manages the capacity and instance types of the compute resources * within the environment. This is based on the compute resource specification that you define or the launch template that you - * specify when you create the compute environment. You can choose to use Amazon EC2 On-Demand Instances or Spot Instances - * in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch - * when the Spot Instance price is below a specified percentage of the On-Demand price.
+ * specify when you create the compute environment. You can choose to use Amazon EC2 On-Demand Instances or Spot Instances in + * your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the + * Spot Instance price is below a specified percentage of the On-Demand price. *Multi-node parallel jobs are not supported on Spot Instances.
*AWS Batch does not upgrade the AMIs in a compute environment after it is created (for example, when a newer * version of the Amazon ECS-optimized AMI is available). You are responsible for the management of the guest operating diff --git a/clients/client-batch/commands/CreateJobQueueCommand.ts b/clients/client-batch/commands/CreateJobQueueCommand.ts index fc323cb50dc6..7308ec986aef 100644 --- a/clients/client-batch/commands/CreateJobQueueCommand.ts +++ b/clients/client-batch/commands/CreateJobQueueCommand.ts @@ -21,11 +21,11 @@ export type CreateJobQueueCommandInput = CreateJobQueueRequest; export type CreateJobQueueCommandOutput = CreateJobQueueResponse & __MetadataBearer; /** - *
Creates an AWS Batch job queue. When you create a job queue, you associate one or more compute environments to - * the queue and assign an order of preference for the compute environments.
+ *Creates an AWS Batch job queue. When you create a job queue, you associate one or more compute environments to the + * queue and assign an order of preference for the compute environments.
*You also set a priority to the job queue that determines the order in which the AWS Batch scheduler places jobs - * onto its associated compute environments. For example, if a compute environment is associated with more than one - * job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute + * onto its associated compute environments. For example, if a compute environment is associated with more than one job + * queue, the job queue with a higher priority is given preference for scheduling jobs to that compute * environment.
*/ export class CreateJobQueueCommand extends $Command< diff --git a/clients/client-batch/commands/DescribeJobDefinitionsCommand.ts b/clients/client-batch/commands/DescribeJobDefinitionsCommand.ts index 37375ddeac75..f8f72d8fe2be 100644 --- a/clients/client-batch/commands/DescribeJobDefinitionsCommand.ts +++ b/clients/client-batch/commands/DescribeJobDefinitionsCommand.ts @@ -21,8 +21,8 @@ export type DescribeJobDefinitionsCommandInput = DescribeJobDefinitionsRequest; export type DescribeJobDefinitionsCommandOutput = DescribeJobDefinitionsResponse & __MetadataBearer; /** - *Describes a list of job definitions. You can specify a status
(such as ACTIVE
) to
- * only return job definitions that match that status.
Describes a list of job definitions. You can specify a status
(such as ACTIVE
) to only
+ * return job definitions that match that status.
Associates the specified tags to a resource with the specified resourceArn
. If existing tags on a
- * resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags
- * associated with that resource are deleted as well. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job
+ * resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags
+ * associated with that resource are deleted as well. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job
* queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
The job index within the array that is associated with this job. This parameter is returned for children of - * array jobs.
+ *The size of the array job. This parameter is returned for parent array jobs.
*/ - index?: number; + size?: number; /** - *The size of the array job. This parameter is returned for parent array jobs.
+ *The job index within the array that is associated with this job. This parameter is returned for children of + * array jobs.
*/ - size?: number; + index?: number; } export namespace ArrayPropertiesSummary { @@ -76,11 +76,6 @@ export namespace ArrayPropertiesSummary { *An object representing the elastic network interface for a multi-node parallel job node.
*/ export interface NetworkInterface { - /** - *The private IPv4 address for the network interface.
- */ - privateIpv4Address?: string; - /** *The attachment ID for the network interface.
*/ @@ -90,6 +85,11 @@ export interface NetworkInterface { *The private IPv6 address for the network interface.
*/ ipv6Address?: string; + + /** + *The private IPv4 address for the network interface.
+ */ + privateIpv4Address?: string; } export namespace NetworkInterface { @@ -103,15 +103,15 @@ export namespace NetworkInterface { */ export interface AttemptContainerDetail { /** - *The Amazon Resource Name (ARN) of the Amazon ECS task that is associated with the job attempt. Each container attempt receives a
- * task ARN when they reach the STARTING
status.
The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts the job attempt.
*/ - taskArn?: string; + containerInstanceArn?: string; /** - *The Amazon Resource Name (ARN) of the Amazon ECS container instance that hosts the job attempt.
+ *The Amazon Resource Name (ARN) of the Amazon ECS task that is associated with the job attempt. Each container attempt receives a task
+ * ARN when they reach the STARTING
status.
The exit code for the job attempt. A non-zero exit code is considered a failure.
@@ -148,28 +148,26 @@ export namespace AttemptContainerDetail { */ export interface AttemptDetail { /** - *A short, human-readable string to provide additional details about the current status of the job - * attempt.
+ *Details about the container in this job attempt.
*/ - statusReason?: string; + container?: AttemptContainerDetail; /** - *The Unix timestamp (in milliseconds) for when the attempt was stopped (when the attempt transitioned from the
- * RUNNING
state to a terminal state, such as SUCCEEDED
or
- * FAILED
).
The Unix timestamp (in milliseconds) for when the attempt was started (when the attempt transitioned from the
+ * STARTING
state to the RUNNING
state).
Details about the container in this job attempt.
+ *The Unix timestamp (in milliseconds) for when the attempt was stopped (when the attempt transitioned from the
+ * RUNNING
state to a terminal state, such as SUCCEEDED
or FAILED
).
The Unix timestamp (in milliseconds) for when the attempt was started (when the attempt transitioned from the
- * STARTING
state to the RUNNING
state).
A short, human-readable string to provide additional details about the current status of the job attempt.
*/ - startedAt?: number; + statusReason?: string; } export namespace AttemptDetail { @@ -179,17 +177,17 @@ export namespace AttemptDetail { } export interface CancelJobRequest { + /** + *The AWS Batch job ID of the job to cancel.
+ */ + jobId: string | undefined; + /** *A message to attach to the job that explains the reason for canceling it. This message is returned by future * DescribeJobs operations on the job. This message is also recorded in the AWS Batch activity * logs.
*/ reason: string | undefined; - - /** - *The AWS Batch job ID of the job to cancel.
- */ - jobId: string | undefined; } export namespace CancelJobRequest { @@ -207,8 +205,8 @@ export namespace CancelJobResponse { } /** - *These errors are usually caused by a client action, such as using an action or resource on behalf of a user - * that doesn't have permissions to use the action or resource, or specifying an identifier that is not valid.
+ *These errors are usually caused by a client action, such as using an action or resource on behalf of a user that + * doesn't have permissions to use the action or resource, or specifying an identifier that is not valid.
*/ export interface ClientException extends __SmithyException, $MetadataBearer { name: "ClientException"; @@ -244,17 +242,65 @@ export enum CRAllocationStrategy { } /** - *An object representing a launch template associated with a compute resource. You must specify either the - * launch template ID or launch template name in the request, but not both.
+ *Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If
+ * the Ec2Configuration
is not specified, the default is ECS_AL1
.
The image type to match with the instance type to pick an AMI. If the imageIdOverride
parameter is
+ * not specified, then a recent Amazon ECS-optimized AMI will be used.
+ * Amazon Linux
+ * 2− Default for all AWS Graviton-based instance
+ * families (for example,
+ * C6g
, M6g
, R6g
, and T4g
) and can be used for all non-GPU
+ * instance types.
+ * Amazon Linux
+ * 2 (GPU)−Default for all GPU instance
+ * families (for example
+ * P4
and G4
) and can be used for all non-AWS Graviton-based instance types.
+ * Amazon + * Linux−Default for all non-GPU, non-AWS-Graviton instance + * families. Amazon Linux is + * reaching the end-of-life of standard support. For more information, see Amazon Linux AMI.
+ *The AMI ID used for instances launched in the compute environment that match the image type. This setting
+ * overrides the imageId
set in the computeResource
object.
An object representing a launch template associated with a compute resource. You must specify either the launch + * template ID or launch template name in the request, but not both.
*/ export interface LaunchTemplateSpecification { /** - *The version number of the launch template, $Latest
, or $Default
.
If the value is $Latest
, the latest version of the launch template is used. If the value is
- * $Default
, the default version of the launch template is used.
Default: $Default
.
The ID of the launch template.
*/ - version?: string; + launchTemplateId?: string; /** *The name of the launch template.
@@ -262,9 +308,12 @@ export interface LaunchTemplateSpecification { launchTemplateName?: string; /** - *The ID of the launch template.
+ *The version number of the launch template, $Latest
, or $Default
.
If the value is $Latest
, the latest version of the launch template is used. If the value is
+ * $Default
, the default version of the launch template is used.
Default: $Default
.
An object representing an AWS Batch compute resource.
*/ export interface ComputeResource { - /** - *The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
- */ - imageId?: string; - /** *The type of compute environment: EC2
or SPOT
.
The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT
compute environment.
- * This role is required if the allocation
- * strategy set to BEST_FIT
or if the allocation strategy is not specified. For more
- * information, see Amazon EC2 Spot
- * Fleet Role in the AWS Batch User Guide.
The allocation strategy to use for the compute resource in case not enough instances of the best fitting
+ * instance type can be allocated. This could be due to availability of the instance type in the region or Amazon EC2 service limits. If this is
+ * not specified, the default is BEST_FIT
, which will use only the best fitting instance type, waiting for
+ * additional capacity if it's not available. This allocation strategy keeps costs lower but can limit scaling. If you
+ * are using Spot Fleets with BEST_FIT
then the Spot Fleet IAM Role must be specified.
+ * BEST_FIT_PROGRESSIVE
will select additional instance types that are large enough to meet the
+ * requirements of the jobs in the queue, with a preference for instance types with a lower cost per vCPU.
+ * SPOT_CAPACITY_OPTIMIZED
is only available for Spot Instance compute resources and will select
+ * additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference
+ * for instance types that are less likely to be interrupted. For more information, see Allocation Strategies in the
+ * AWS Batch User Guide.
The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name
- * or full Amazon Resource Name (ARN) of an instance profile. For example,
- *
- * ecsInstanceRole
- *
or
- * arn:aws:iam::
.
- * For more information, see Amazon ECS Instance
- * Role in the AWS Batch User Guide.
The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is
+ * DISABLED
).
The instances types that may be launched. You can specify instance families to launch any instance type within
- * those families (for example, c5
or p3
), or you can specify specific sizes within a
- * family (such as c5.8xlarge
). You can also choose optimal
to pick instance types (from
- * the C, M, and R instance families) on the fly that match the demand of your job queues.
The maximum number of Amazon EC2 vCPUs that an environment can reach.
*/ - instanceTypes: string[] | undefined; + maxvCpus: number | undefined; /** *The desired number of Amazon EC2 vCPUS in the compute environment.
@@ -328,42 +368,33 @@ export interface ComputeResource { desiredvCpus?: number; /** - *The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security
- * groups must be specified, either in securityGroupIds
or using a launch template referenced in
- * launchTemplate
. If security groups are specified using both securityGroupIds
and
- * launchTemplate
, the values in securityGroupIds
will be used.
The instances types that may be launched. You can specify instance families to launch any instance type within
+ * those families (for example, c5
or p3
), or you can specify specific sizes within a family
+ * (such as c5.8xlarge
). You can also choose optimal
to pick instance types (from the C, M,
+ * and R instance families) on the fly that match the demand of your job queues.
The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is
- * DISABLED
).
The Amazon Machine Image (AMI) ID used for instances launched in the compute
+ * environment. This parameter is overridden by
+ * the imageIdOverride
member of the Ec2Configuration
structure.
The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that - * instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price - * must be below 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price - * and never more than your maximum percentage. If you leave this field empty, the default value is 100% of the - * On-Demand price.
+ *The VPC subnets into which the compute resources are launched. For more information, see VPCs and Subnets in the Amazon + * VPC User Guide.
*/ - bidPercentage?: number; + subnets: string[] | undefined; /** - *The allocation strategy to use for the compute resource in case not enough instances of the best fitting
- * instance type can be allocated. This could be due to availability of the instance type in the region or Amazon EC2 service limits. If
- * this is not specified, the default is BEST_FIT
, which will use only the best fitting instance type,
- * waiting for additional capacity if it's not available. This allocation strategy keeps costs lower but can limit
- * scaling. If you are using Spot Fleets with BEST_FIT
then the Spot Fleet IAM Role must be specified.
- * BEST_FIT_PROGRESSIVE
will select additional instance types that are large enough to meet the
- * requirements of the jobs in the queue, with a preference for instance types with a lower cost per vCPU.
- * SPOT_CAPACITY_OPTIMIZED
is only available for Spot Instance compute resources and will select
- * additional instance types that are large enough to meet the requirements of the jobs in the queue, with a
- * preference for instance types that are less likely to be interrupted. For more information, see Allocation Strategies in
- * the AWS Batch User Guide.
The Amazon EC2 security groups associated with instances launched in the compute environment. One or more security
+ * groups must be specified, either in securityGroupIds
or using a launch template referenced in
+ * launchTemplate
. If security groups are specified using both securityGroupIds
and
+ * launchTemplate
, the values in securityGroupIds
will be used.
The Amazon EC2 key pair that is used for instances launched in the compute environment.
@@ -371,44 +402,68 @@ export interface ComputeResource { ec2KeyPair?: string; /** - *Key-value pair tags to be applied to resources that are launched in the compute environment. For AWS Batch, - * these take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value—for - * example, { "Name": "AWS Batch Instance - C4OnDemand" - * }. These tags can not be updated or removed - * after the compute environment has been created; any changes require creating a new compute environment and - * removing the old compute environment. These tags are not seen when using the AWS Batch ListTagsForResource API - * operation.
+ *The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name
+ * or full Amazon Resource Name (ARN) of an instance profile. For example,
+ *
+ * ecsInstanceRole
+ *
or
+ * arn:aws:iam::
.
+ * For more information, see Amazon ECS Instance
+ * Role in the AWS Batch User Guide.
Key-value pair tags to be applied to resources that are launched in the compute environment. For AWS Batch, these + * take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value—for example, { + * "Name": "AWS Batch Instance - C4OnDemand" }. + * These tags can not be updated or removed after the compute environment has been created; any changes require creating + * a new compute environment and removing the old compute environment. These tags are not seen when using the AWS Batch + * ListTagsForResource API operation.
*/ tags?: { [key: string]: string }; /** - *The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node - * parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it - * with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a - * single Availability Zone with high network flow potential. For more information, see Placement Groups in the Amazon EC2 User Guide - * for Linux Instances.
+ *The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel + * jobs to your compute environment, you should consider creating a cluster placement group and associate it with your + * compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single + * Availability Zone with high network flow potential. For more information, see Placement Groups in the Amazon EC2 User Guide for + * Linux Instances.
*/ placementGroup?: string; /** - *The VPC subnets into which the compute resources are launched. For more information, see VPCs and Subnets in the - * Amazon VPC User Guide.
+ *The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that + * instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must + * be below 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and + * never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand + * price.
*/ - subnets: string[] | undefined; + bidPercentage?: number; + + /** + *The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT
compute environment.
+ * This role is required if the allocation
+ * strategy set to BEST_FIT
or if the allocation strategy is not specified. For more
+ * information, see Amazon EC2 Spot Fleet
+ * Role in the AWS Batch User Guide.
The launch template to use for your compute resources. Any other compute resource parameters that you specify - * in a CreateComputeEnvironment API operation override the same parameters in the launch template. - * You must specify either the launch template ID or launch template name in the request, but not both. For more + *
The launch template to use for your compute resources. Any other compute resource parameters that you specify in + * a CreateComputeEnvironment API operation override the same parameters in the launch template. You + * must specify either the launch template ID or launch template name in the request, but not both. For more * information, see Launch Template * Support in the AWS Batch User Guide.
*/ launchTemplate?: LaunchTemplateSpecification; /** - *The maximum number of Amazon EC2 vCPUs that an environment can reach.
+ *Provides additional details used to selecting the AMI to use for instances in a compute environment.
*/ - maxvCpus: number | undefined; + ec2Configuration?: Ec2Configuration[]; } export namespace ComputeResource { @@ -428,16 +483,6 @@ export enum CEType { } export interface CreateComputeEnvironmentRequest { - /** - *The tags that you apply to the compute environment to help you categorize and organize your resources. Each - * tag consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General - * Reference.
- * - *These tags can be updated or removed using the TagResource and UntagResource API operations. These tags do not - * propagate to the underlying compute resources.
- */ - tags?: { [key: string]: string }; - /** *The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and * underscores are allowed.
@@ -450,6 +495,12 @@ export interface CreateComputeEnvironmentRequest { */ type: CEType | string | undefined; + /** + *The state of the compute environment. If the state is ENABLED
, then the compute environment accepts
+ * jobs from a queue and can scale out automatically based on queues.
Details of the compute resources managed by the compute environment. This parameter is required for managed * compute environments. For more information, see Compute Environments in the @@ -463,19 +514,23 @@ export interface CreateComputeEnvironmentRequest { *
If your specified role has a path other than /
, then you must either specify the full role ARN
* (this is recommended) or prefix the role name with the path.
Depending on how you created your AWS Batch service role, its ARN may contain the service-role
- * path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN does not use the
- * service-role
path prefix. Because of this, we recommend that you specify the full ARN of your
- * service role when you create compute environments.
Depending on how you created your AWS Batch service role, its ARN may contain the service-role
path
+ * prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN does not use the
+ * service-role
path prefix. Because of this, we recommend that you specify the full ARN of your service
+ * role when you create compute environments.
The state of the compute environment. If the state is ENABLED
, then the compute environment
- * accepts jobs from a queue and can scale out automatically based on queues.
The tags that you apply to the compute environment to help you categorize and organize your resources. Each tag + * consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General + * Reference.
+ * + *These tags can be updated or removed using the TagResource and UntagResource API operations. These tags do not + * propagate to the underlying compute resources.
*/ - state?: CEState | string; + tags?: { [key: string]: string }; } export namespace CreateComputeEnvironmentRequest { @@ -509,14 +564,14 @@ export namespace CreateComputeEnvironmentResponse { */ export interface ComputeEnvironmentOrder { /** - *The Amazon Resource Name (ARN) of the compute environment.
+ *The order of the compute environment.
*/ - computeEnvironment: string | undefined; + order: number | undefined; /** - *The order of the compute environment.
+ *The Amazon Resource Name (ARN) of the compute environment.
*/ - order: number | undefined; + computeEnvironment: string | undefined; } export namespace ComputeEnvironmentOrder { @@ -531,6 +586,11 @@ export enum JQState { } export interface CreateJobQueueRequest { + /** + *The name of the job queue.
+ */ + jobQueueName: string | undefined; + /** *The state of the job queue. If the job queue state is ENABLED
, it is able to accept jobs. If the
* job queue state is DISABLED
, new jobs cannot be added to the queue, but jobs already in the queue can
@@ -538,33 +598,28 @@ export interface CreateJobQueueRequest {
*/
state?: JQState | string;
- /**
- *
The tags that you apply to the job queue to help you categorize and organize your resources. Each tag consists - * of a key and an optional value. For more information, see Tagging AWS Resources in AWS General - * Reference.
- */ - tags?: { [key: string]: string }; - /** *The priority of the job queue. Job queues with a higher priority (or a higher integer value for the
- * priority
parameter) are evaluated first when associated with the same compute environment. Priority
- * is determined in descending order, for example, a job queue with a priority value of 10
is given
- * scheduling preference over a job queue with a priority value of 1
.
priority
parameter) are evaluated first when associated with the same compute environment. Priority is
+ * determined in descending order, for example, a job queue with a priority value of 10
is given scheduling
+ * preference over a job queue with a priority value of 1
.
*/
priority: number | undefined;
/**
- * The name of the job queue.
+ *The set of compute environments mapped to a job queue and their order relative to each other. The job scheduler
+ * uses this parameter to determine which compute environment should execute a given job. Compute environments must be
+ * in the VALID
state before you can associate them with a job queue. You can associate up to three compute
+ * environments with a job queue.
The set of compute environments mapped to a job queue and their order relative to each other. The job
- * scheduler uses this parameter to determine which compute environment should execute a given job. Compute
- * environments must be in the VALID
state before you can associate them with a job queue. You can
- * associate up to three compute environments with a job queue.
The tags that you apply to the job queue to help you categorize and organize your resources. Each tag consists + * of a key and an optional value. For more information, see Tagging AWS Resources in AWS General + * Reference.
*/ - computeEnvironmentOrder: ComputeEnvironmentOrder[] | undefined; + tags?: { [key: string]: string }; } export namespace CreateJobQueueRequest { @@ -635,8 +690,7 @@ export namespace DeleteJobQueueResponse { export interface DeregisterJobDefinitionRequest { /** - *The name and revision (name:revision
) or full Amazon Resource Name (ARN) of the job definition to
- * deregister.
The name and revision (name:revision
) or full Amazon Resource Name (ARN) of the job definition to deregister.
A list of up to 100 compute environment names or full Amazon Resource Name (ARN) entries.
+ */ + computeEnvironments?: string[]; + + /** + *The maximum number of cluster results returned by DescribeComputeEnvironments
in paginated output.
+ * When this parameter is used, DescribeComputeEnvironments
only returns maxResults
results in
+ * a single page along with a nextToken
response element. The remaining results of the initial request can
+ * be seen by sending another DescribeComputeEnvironments
request with the returned nextToken
+ * value. This value can be between 1 and 100. If this parameter is not used, then
+ * DescribeComputeEnvironments
returns up to 100 results and a nextToken
+ * value if applicable.
The nextToken
value returned from a previous paginated DescribeComputeEnvironments
* request where maxResults
was used and the results exceeded the value of that parameter. Pagination
@@ -667,22 +737,6 @@ export interface DescribeComputeEnvironmentsRequest {
*
The maximum number of cluster results returned by DescribeComputeEnvironments
in paginated
- * output. When this parameter is used, DescribeComputeEnvironments
only returns maxResults
- * results in a single page along with a nextToken
response element. The remaining results of the
- * initial request can be seen by sending another DescribeComputeEnvironments
request with the returned
- * nextToken
value. This value can be between 1 and 100. If this
- * parameter is not used, then DescribeComputeEnvironments
returns up to 100 results
- * and a nextToken
value if applicable.
A list of up to 100 compute environment names or full Amazon Resource Name (ARN) entries.
- */ - computeEnvironments?: string[]; } export namespace DescribeComputeEnvironmentsRequest { @@ -705,40 +759,15 @@ export enum CEStatus { */ export interface ComputeEnvironmentDetail { /** - *A short, human-readable string to provide additional details about the current status of the compute - * environment.
- */ - statusReason?: string; - - /** - *The service role associated with the compute environment that allows AWS Batch to make calls to AWS API - * operations on your behalf.
+ *The name of the compute environment.
*/ - serviceRole?: string; + computeEnvironmentName: string | undefined; /** *The Amazon Resource Name (ARN) of the compute environment.
*/ computeEnvironmentArn: string | undefined; - /** - *The state of the compute environment. The valid values are ENABLED
or
- * DISABLED
.
If the state is ENABLED
, then the AWS Batch scheduler can attempt to place jobs from an associated
- * job queue on the compute resources within the environment. If the compute environment is managed, then it can
- * scale its instances out or in automatically, based on the job queue demand.
If the state is DISABLED
, then the AWS Batch scheduler does not attempt to place jobs within the
- * environment. Jobs in a STARTING
or RUNNING
state continue to progress normally. Managed
- * compute environments in the DISABLED
state do not scale out. However, they scale in to
- * minvCpus
value after instances become idle.
The compute resources defined for the compute environment.
- */ - computeResources?: ComputeResource; - /** *The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster used by the compute environment.
*/ @@ -749,21 +778,44 @@ export interface ComputeEnvironmentDetail { */ tags?: { [key: string]: string }; - /** - *The current status of the compute environment (for example, CREATING
or
- * VALID
).
The type of the compute environment.
*/ type?: CEType | string; /** - *The name of the compute environment.
- */ - computeEnvironmentName: string | undefined; + *The state of the compute environment. The valid values are ENABLED
or DISABLED
.
If the state is ENABLED
, then the AWS Batch scheduler can attempt to place jobs from an associated
+ * job queue on the compute resources within the environment. If the compute environment is managed, then it can scale
+ * its instances out or in automatically, based on the job queue demand.
If the state is DISABLED
, then the AWS Batch scheduler does not attempt to place jobs within the
+ * environment. Jobs in a STARTING
or RUNNING
state continue to progress normally. Managed
+ * compute environments in the DISABLED
state do not scale out. However, they scale in to
+ * minvCpus
value after instances become idle.
The current status of the compute environment (for example, CREATING
or VALID
).
A short, human-readable string to provide additional details about the current status of the compute + * environment.
+ */ + statusReason?: string; + + /** + *The compute resources defined for the compute environment.
+ */ + computeResources?: ComputeResource; + + /** + *The service role associated with the compute environment that allows AWS Batch to make calls to AWS API + * operations on your behalf.
+ */ + serviceRole?: string; } export namespace ComputeEnvironmentDetail { @@ -774,17 +826,17 @@ export namespace ComputeEnvironmentDetail { export interface DescribeComputeEnvironmentsResponse { /** - *The nextToken
value to include in a future DescribeComputeEnvironments
request. When
- * the results of a DescribeJobDefinitions
request exceed maxResults
, this value can be
- * used to retrieve the next page of results. This value is null
when there are no more results to
- * return.
The list of compute environments.
*/ - nextToken?: string; + computeEnvironments?: ComputeEnvironmentDetail[]; /** - *The list of compute environments.
+ *The nextToken
value to include in a future DescribeComputeEnvironments
request. When
+ * the results of a DescribeJobDefinitions
request exceed maxResults
, this value can be used
+ * to retrieve the next page of results. This value is null
when there are no more results to
+ * return.
The nextToken
value returned from a previous paginated DescribeJobDefinitions
- * request where maxResults
was used and the results exceeded the value of that parameter. Pagination
- * continues from the end of the previous results that returned the nextToken
value. This value is
- * null
when there are no more results to return.
This token should be treated as an opaque identifier that is only used to - * retrieve the next items in a list and not for other programmatic purposes.
- *A list of up to 100 job definition names or full Amazon Resource Name (ARN) entries.
*/ - nextToken?: string; + jobDefinitions?: string[]; /** *The maximum number of results returned by DescribeJobDefinitions
in paginated output. When this
- * parameter is used, DescribeJobDefinitions
only returns maxResults
results in a single
- * page along with a nextToken
response element. The remaining results of the initial request can be
- * seen by sending another DescribeJobDefinitions
request with the returned nextToken
- * value. This value can be between 1 and 100. If this parameter is not used, then
- * DescribeJobDefinitions
returns up to 100 results and a nextToken
- * value if applicable.
DescribeJobDefinitions
only returns maxResults
results in a single page
+ * along with a nextToken
response element. The remaining results of the initial request can be seen by
+ * sending another DescribeJobDefinitions
request with the returned nextToken
value. This
+ * value can be between 1 and 100. If this parameter is not used, then
+ * DescribeJobDefinitions
returns up to 100 results and a nextToken
value
+ * if applicable.
*/
maxResults?: number;
@@ -828,9 +873,16 @@ export interface DescribeJobDefinitionsRequest {
status?: string;
/**
- * A list of up to 100 job definition names or full Amazon Resource Name (ARN) entries.
+ *The nextToken
value returned from a previous paginated DescribeJobDefinitions
request
+ * where maxResults
was used and the results exceeded the value of that parameter. Pagination continues
+ * from the end of the previous results that returned the nextToken
value. This value is null
+ * when there are no more results to return.
This token should be treated as an opaque identifier that is only used to + * retrieve the next items in a list and not for other programmatic purposes.
+ *The name of the key-value pair. For environment variables, this is the name of the environment - * variable.
+ *The name of the key-value pair. For environment variables, this is the name of the environment variable.
*/ name?: string; @@ -873,21 +924,21 @@ export enum DeviceCgroupPermission { */ export interface Device { /** - *The explicit permissions to provide to the container for the device. By default, the container has permissions
- * for read
, write
, and mknod
for the device.
The path for the device on the host container instance.
*/ - permissions?: (DeviceCgroupPermission | string)[]; + hostPath: string | undefined; /** - *The path inside the container at which to expose the host device. By default the hostPath
value
- * is used.
The path inside the container at which to expose the host device. By default the hostPath
value is
+ * used.
The path for the device on the host container instance.
+ *The explicit permissions to provide to the container for the device. By default, the container has permissions
+ * for read
, write
, and mknod
for the device.
The container path, mount options, and size of the tmpfs mount.
*/ export interface Tmpfs { + /** + *The absolute file path in the container where the tmpfs volume is to be mounted.
+ */ + containerPath: string | undefined; + + /** + *The size (in MiB) of the tmpfs volume.
+ */ + size: number | undefined; + /** *The list of tmpfs volume mount options.
*Valid values: "defaults
" | "ro
" | "rw
" | "suid
" |
@@ -912,16 +973,6 @@ export interface Tmpfs {
* "gid
" | "nr_inodes
" | "nr_blocks
" | "mpol
"
The size (in MiB) of the tmpfs volume.
- */ - size: number | undefined; - - /** - *The absolute file path in the container where the tmpfs volume is to be mounted.
- */ - containerPath: string | undefined; } export namespace Tmpfs { @@ -935,57 +986,58 @@ export namespace Tmpfs { */ export interface LinuxParameters { /** - *The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the
- * --tmpfs
option to docker run.
Any host devices to expose to the container. This parameter maps to Devices
in the
+ * Create a container section of the Docker Remote API and the --device
option to docker run.
The value for the size (in MiB) of the /dev/shm
volume. This parameter maps to the
- * --shm-size
option to docker run.
If true, run an init
process inside the container that forwards signals and reaps processes. This
+ * parameter maps to the --init
option to docker run.
+ * This parameter requires version 1.25 of the Docker Remote API or greater on your
+ * container instance. To check the Docker Remote API version on your container instance, log into your
+ * container instance and run the following command: sudo docker version | grep "Server API version"
+ *
Any host devices to expose to the container. This parameter maps to Devices
in the
- * Create a container section of the Docker Remote API and the --device
option to docker run.
The value for the size (in MiB) of the /dev/shm
volume. This parameter maps to the
+ * --shm-size
option to docker run.
This allows you to tune a container's memory swappiness behavior. A swappiness
value of
- * 0
will cause swapping to not happen unless absolutely necessary. A swappiness
value of
- * 100
will cause pages to be swapped very aggressively. Accepted values are whole numbers between
- * 0
and 100
. If the swappiness
parameter is not specified, a default value
- * of 60
is used. If a value is not specified for maxSwap
then this parameter is ignored.
- * This parameter maps to the --memory-swappiness
option to docker
- * run.
The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the
+ * --tmpfs
option to docker run.
The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the
- * --memory-swap
option to docker run where the value
- * would be the sum of the container memory plus the maxSwap
value. For more information,
+ * --memory-swap
option to docker run where the value would be
+ * the sum of the container memory plus the maxSwap
+ * value. For more information,
* see
* --memory-swap
details in the Docker
* documentation.
If a maxSwap
value of 0
is specified, the container will not use swap. Accepted
- * values are 0
or any positive integer. If the maxSwap
parameter is omitted, the container
- * will use the swap configuration for the container instance it is running on. A maxSwap
value must be
- * set for the swappiness
parameter to be used.
If a maxSwap
value of 0
is specified, the container will not use swap. Accepted values
+ * are 0
or any positive integer. If the maxSwap
parameter is omitted, the container will use
+ * the swap configuration for the container instance it is running on. A maxSwap
value must be set for the
+ * swappiness
parameter to be used.
If true, run an init
process inside the container that forwards signals and reaps processes. This
- * parameter maps to the --init
option to docker run.
- * This parameter requires version 1.25 of the Docker Remote API or greater on your
- * container instance. To check the Docker Remote API version on your container instance, log into your
- * container instance and run the following command: sudo docker version | grep "Server API version"
- *
This allows you to tune a container's memory swappiness behavior. A swappiness
value of
+ * 0
will cause swapping to not happen unless absolutely necessary. A swappiness
value of
+ * 100
will cause pages to be swapped very aggressively. Accepted values are whole numbers between
+ * 0
and 100
. If the swappiness
parameter is not specified, a default value of
+ * 60
is used. If a value is not specified for maxSwap
then this parameter is ignored. This
+ * parameter maps to the --memory-swappiness
option to docker
+ * run.
To inject sensitive data into your containers as environment variables, use the secrets
- * container definition parameter.
To inject sensitive data into your containers as environment variables, use the secrets
container
+ * definition parameter.
To reference sensitive information in the log configuration of a container, use the
- * secretOptions
container definition parameter.
To reference sensitive information in the log configuration of a container, use the secretOptions
+ * container definition parameter.
For more information, see Specifying Sensitive Data in the * Amazon Elastic Container Service Developer Guide.
*/ export interface Secret { + /** + *The name of the secret.
+ */ + name: string | undefined; + /** *The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the * full ARN of the parameter in the AWS Systems Manager Parameter Store.
*If the AWS Systems Manager Parameter Store parameter exists in the same Region as the task you are launching, then you can - * use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full - * ARN must be specified.
+ * use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN + * must be specified. *The name of the secret.
- */ - name: string | undefined; } export namespace Secret { @@ -1048,12 +1100,6 @@ export namespace Secret { *Log configuration options to send to a custom log driver for the container.
*/ export interface LogConfiguration { - /** - *The secrets to pass to the log configuration. For more information, see Specifying Sensitive Data in the - * AWS Batch User Guide.
- */ - secretOptions?: Secret[]; - /** *The log driver to use for the container. The valid values listed for this parameter are log drivers that the * Amazon ECS container agent can communicate with by default.
@@ -1102,9 +1148,9 @@ export interface LogConfiguration { * *If you have a custom driver that is not listed earlier that you would like to work with the Amazon ECS container - * agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that - * driver. We encourage you to submit pull requests for changes that you would like to have included. However, - * Amazon Web Services does not currently support running modified copies of this software.
+ * agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests + * for changes that you would like to have included. However, Amazon Web Services does not currently support running modified + * copies of this software. *This parameter requires version 1.18 of the Docker Remote API or greater on your * container instance. To check the Docker Remote API version on your container instance, log into your @@ -1120,6 +1166,12 @@ export interface LogConfiguration { *
*/ options?: { [key: string]: string }; + + /** + *The secrets to pass to the log configuration. For more information, see Specifying Sensitive Data in the + * AWS Batch User Guide.
+ */ + secretOptions?: Secret[]; } export namespace LogConfiguration { @@ -1166,17 +1218,17 @@ export enum ResourceType { *GPU
.
*/
export interface ResourceRequirement {
- /**
- * The type of resource to assign to a container. Currently, the only supported resource type is
- * GPU
.
The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a * job should not exceed the number of available GPUs on the compute resource that the job is launched on.
*/ value: string | undefined; + + /** + *The type of resource to assign to a container. Currently, the only supported resource type is
+ * GPU
.
The soft limit for the ulimit
type.
The hard limit for the ulimit
type.
The type
of the ulimit
.
The hard limit for the ulimit
type.
The soft limit for the ulimit
type.
Determine whether your data volume persists on the host container instance and where it is stored. If this - * parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not - * guaranteed to persist after the containers associated with it stop running.
+ * parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not guaranteed + * to persist after the containers associated with it stop running. */ export interface Host { /** - *The path on the host container instance that is presented to the container. If this parameter is empty, then - * the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data - * volume persists at the specified location on the host container instance until you delete it manually. If the - * source path location does not exist on the host container instance, the Docker daemon creates it. If the location - * does exist, the contents of the source path folder are exported.
+ *The path on the host container instance that is presented to the container. If this parameter is empty, then the + * Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume + * persists at the specified location on the host container instance until you delete it manually. If the source path + * location does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the + * contents of the source path folder are exported.
*/ sourcePath?: string; } @@ -1237,20 +1289,20 @@ export namespace Host { *A data volume used in a job's container properties.
*/ export interface Volume { + /** + *The contents of the host
parameter determine whether your data volume persists on the host
+ * container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path
+ * for your data volume. However, the data is not guaranteed to persist after the containers associated with it stop
+ * running.
The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
* allowed. This name is referenced in the sourceVolume
* parameter of container definition mountPoints
.
The contents of the host
parameter determine whether your data volume persists on the host
- * container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host
- * path for your data volume. However, the data is not guaranteed to persist after the containers associated with it
- * stop running.
A list of ulimits
to set in the container. This parameter maps to Ulimits
in the
- * Create a container section of the Docker Remote API and the --ulimit
option to docker run.
The type and amount of a resource to assign to a container. Currently, the only supported resource is
- * GPU
.
When this parameter is true, the container is given elevated privileges on the host container instance
- * (similar to the root
user). This parameter maps to Privileged
in the
- * Create a container section of the Docker Remote API and the --privileged
option to
- * docker run.
When this parameter is true, the container is given read-only access to its root file system. This parameter
- * maps to ReadonlyRootfs
in the Create a container section of the Docker Remote API and
- * the --read-only
option to docker run
.
The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For more information, see AWS Batch execution IAM role.
- */ - executionRoleArn?: string; - - /** - *The mount points for data volumes in your container. This parameter maps to Volumes
in the
- * Create a container section of the Docker Remote API and the --volume
option to docker run.
Linux-specific modifications that are applied to the container, such as details for device mappings.
- */ - linuxParameters?: LinuxParameters; - - /** - *The user name to use inside the container. This parameter maps to User
in the
- * Create a container section of the Docker Remote API and the --user
option to docker run.
The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory
- * specified here, the container is killed. This parameter maps to Memory
in the
- * Create a container section of the Docker Remote API and the --memory
option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified
- * in several places for multi-node parallel (MNP) jobs; it must be specified for each node at least
- * once.
If you are trying to maximize your resource utilization by providing your jobs as much memory as possible - * for a particular instance type, see Memory - * Management in the AWS Batch User Guide.
- *The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
* Hub registry are available by default. Other repositories are specified with
*
* repository-url/image:tag
*
.
- * Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons,
- * periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the Create a container section
- * of the Docker Remote API and the IMAGE
parameter of docker
+ * Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons,
+ * periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the Create a container section of
+ * the Docker Remote API and the IMAGE
parameter of docker
* run.
The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions.
+ *The number of vCPUs reserved for the container. This parameter maps to CpuShares
in the
+ * Create a container section of the Docker Remote API and the --cpu-shares
option to
+ * docker run. Each vCPU is equivalent to 1,024 CPU shares. You must
+ * specify at least one vCPU. This is required
+ * but can be specified in several places for multi-node parallel (MNP) jobs; it must be specified for each node at
+ * least once.
A list of data volumes used in a job.
+ *The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory
+ * specified here, the container is killed. This parameter maps to Memory
in the
+ * Create a container section of the Docker Remote API and the --memory
option to docker run. You must specify at least 4 MiB of memory for a
+ * job. This is required but can be specified in
+ * several places for multi-node parallel (MNP) jobs; it must be specified for each node at least
+ * once.
If you are trying to maximize your resource utilization by providing your jobs as much memory as possible for a + * particular instance type, see Memory + * Management in the AWS Batch User Guide.
+ *The instance type to use for a multi-node parallel job. Currently all node groups in a multi-node parallel job - * must use the same instance type. This parameter is not valid for single-node container jobs.
+ *The command that is passed to the container. This parameter maps to Cmd
in the
+ * Create a container section of the Docker Remote API and the COMMAND
parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd.
The log configuration specification for the container.
- *This parameter maps to LogConfig
in the Create a container section of the
- * Docker Remote API and the --log-driver
option to docker
- * run. By default, containers use the same logging driver that the Docker daemon uses. However the
- * container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter
- * in the container definition. To use a different logging driver for a container, the log system must be configured
- * properly on the container instance (or on a different log server for remote logging options). For more information
- * on the options for different supported log drivers, see Configure logging drivers in the Docker
- * documentation.
AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type).
- *This parameter requires version 1.18 of the Docker Remote API or greater on your
- * container instance. To check the Docker Remote API version on your container instance, log into your
- * container instance and run the following command: sudo docker version | grep "Server API version"
- *
The Amazon ECS container agent running on a container instance must register the logging drivers available on
- * that instance with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed
- * on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the
- * Amazon Elastic Container Service Developer Guide.
The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions.
*/ - logConfiguration?: LogConfiguration; + jobRoleArn?: string; /** - *The command that is passed to the container. This parameter maps to Cmd
in the
- * Create a container section of the Docker Remote API and the COMMAND
parameter to
- * docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd.
The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For more information, see AWS Batch execution IAM role.
*/ - command?: string[]; + executionRoleArn?: string; /** - *The number of vCPUs reserved for the container. This parameter maps to CpuShares
in the
- * Create a container section of the Docker Remote API and the --cpu-shares
option to
- * docker run. Each vCPU is equivalent to 1,024 CPU shares. You must
- * specify at least one vCPU. This is required
- * but can be specified in several places for multi-node parallel (MNP) jobs; it must be specified for each node at
- * least once.
A list of data volumes used in a job.
*/ - vcpus?: number; + volumes?: Volume[]; /** *The environment variables to pass to a container. This parameter maps to Env
in the
@@ -1431,6 +1407,82 @@ export interface ContainerProperties {
*/
environment?: KeyValuePair[];
+ /**
+ *
The mount points for data volumes in your container. This parameter maps to Volumes
in the
+ * Create a container section of the Docker Remote API and the --volume
option to docker run.
When this parameter is true, the container is given read-only access to its root file system. This parameter
+ * maps to ReadonlyRootfs
in the Create a container section of the Docker Remote API and
+ * the --read-only
option to docker run
.
When this parameter is true, the container is given elevated privileges on the host container instance (similar
+ * to the root
user). This parameter maps to Privileged
in the
+ * Create a container section of the Docker Remote API and the --privileged
option to
+ * docker run.
A list of ulimits
to set in the container. This parameter maps to Ulimits
in the
+ * Create a container section of the Docker Remote API and the --ulimit
option to docker run.
The user name to use inside the container. This parameter maps to User
in the
+ * Create a container section of the Docker Remote API and the --user
option to docker run.
The instance type to use for a multi-node parallel job. Currently all node groups in a multi-node parallel job + * must use the same instance type. This parameter is not valid for single-node container jobs.
+ */ + instanceType?: string; + + /** + *The type and amount of a resource to assign to a container. Currently, the only supported resource is
+ * GPU
.
Linux-specific modifications that are applied to the container, such as details for device mappings.
+ */ + linuxParameters?: LinuxParameters; + + /** + *The log configuration specification for the container.
+ *This parameter maps to LogConfig
in the Create a container section of the
+ * Docker Remote API and the --log-driver
option to docker run.
+ * By default, containers use the same logging driver that the Docker daemon uses. However the container may use a
+ * different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
+ * definition. To use a different logging driver for a container, the log system must be configured properly on the
+ * container instance (or on a different log server for remote logging options). For more information on the options for
+ * different supported log drivers, see Configure
+ * logging drivers in the Docker documentation.
AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type).
+ *This parameter requires version 1.18 of the Docker Remote API or greater on your
+ * container instance. To check the Docker Remote API version on your container instance, log into your
+ * container instance and run the following command: sudo docker version | grep "Server API version"
+ *
The Amazon ECS container agent running on a container instance must register the logging drivers available on that
+ * instance with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that
+ * instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the
+ * Amazon Elastic Container Service Developer Guide.
The secrets for the container. For more information, see Specifying Sensitive Data in the * Amazon Elastic Container Service Developer Guide.
@@ -1449,19 +1501,18 @@ export namespace ContainerProperties { */ export interface NodeRangeProperty { /** - *The container details for the node range.
+ *The range of nodes, using node index values. A range of 0:3
indicates nodes with index values of
+ * 0
through 3
. If the starting range value is omitted (:n
), then 0
+ * is used to start the range. If the ending range value is omitted (n:
), then the highest possible node
+ * index is used to end the range. Your accumulative node ranges must account for all nodes (0:n). You may nest node
+ * ranges, for example 0:10 and 4:5, in which case the 4:5 range properties override the 0:10 properties.
The range of nodes, using node index values. A range of 0:3
indicates nodes with index values of
- * 0
through 3
. If the starting range value is omitted (:n
), then
- * 0
is used to start the range. If the ending range value is omitted (n:
), then the
- * highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes
- * (0:n). You may nest node ranges, for example 0:10 and 4:5, in which case the 4:5 range properties override the
- * 0:10 properties.
The container details for the node range.
*/ - targetNodes: string | undefined; + container?: ContainerProperties; } export namespace NodeRangeProperty { @@ -1474,11 +1525,6 @@ export namespace NodeRangeProperty { *An object representing the node properties of a multi-node parallel job.
*/ export interface NodeProperties { - /** - *A list of node ranges and their properties associated with a multi-node parallel job.
- */ - nodeRangeProperties: NodeRangeProperty[] | undefined; - /** *The number of nodes associated with a multi-node parallel job.
*/ @@ -1489,6 +1535,11 @@ export interface NodeProperties { * than the number of nodes. */ mainNode: number | undefined; + + /** + *A list of node ranges and their properties associated with a multi-node parallel job.
+ */ + nodeRangeProperties: NodeRangeProperty[] | undefined; } export namespace NodeProperties { @@ -1508,17 +1559,11 @@ export enum RetryAction { */ export interface EvaluateOnExit { /** - *Contains a glob pattern to match against the decimal representation of the ExitCode
returned for a
- * job. The patten can be up to 512 characters long, can contain only numbers, and can optionally end with an asterisk
- * (*) so that only the start of the string needs to be an exact match.
Specifies the action to take if all of the specified conditions (onStatusReason
,
- * onReason
, and onExitCode
) are met.
Contains a glob pattern to match against the StatusReason
returned for a job. The patten can be up
+ * to 512 characters long, can contain letters, numbers, periods (.), colons (:), and whitespace (spaces, tabs). and can
+ * optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.
Contains a glob pattern to match against the Reason
returned for a job. The patten can be up to 512
@@ -1528,11 +1573,17 @@ export interface EvaluateOnExit {
onReason?: string;
/**
- *
Contains a glob pattern to match against the StatusReason
returned for a job. The patten can be up
- * to 512 characters long, can contain letters, numbers, periods (.), colons (:), and whitespace (spaces, tabs). and can
- * optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.
Contains a glob pattern to match against the decimal representation of the ExitCode
returned for a
+ * job. The patten can be up to 512 characters long, can contain only numbers, and can optionally end with an asterisk
+ * (*) so that only the start of the string needs to be an exact match.
Specifies the action to take if all of the specified conditions (onStatusReason
,
+ * onReason
, and onExitCode
) are met.
The number of times to move a job to the RUNNABLE
status. You may specify between 1 and 10
- * attempts. If the value of attempts
is greater than one, the job is retried on failure the same number
- * of attempts as the value.
attempts
is greater than one, the job is retried on failure the same number of
+ * attempts as the value.
*/
attempts?: number;
@@ -1588,14 +1639,14 @@ export namespace JobTimeout {
*/
export interface JobDefinition {
/**
- * The retry strategy to use for failed jobs that are submitted with this job definition.
+ *The name of the job definition.
*/ - retryStrategy?: RetryStrategy; + jobDefinitionName: string | undefined; /** - *The type of job definition.
+ *The Amazon Resource Name (ARN) for the job definition.
*/ - type: string | undefined; + jobDefinitionArn: string | undefined; /** *The revision of the job definition.
@@ -1603,48 +1654,48 @@ export interface JobDefinition { revision: number | undefined; /** - *The tags applied to the job definition.
+ *The status of the job definition.
*/ - tags?: { [key: string]: string }; + status?: string; /** - *The status of the job definition.
+ *The type of job definition.
*/ - status?: string; + type: string | undefined; /** - *An object with various properties specific to multi-node parallel jobs.
+ *Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are
+ * specified as a key-value pair mapping. Parameters in a SubmitJob
request override any corresponding
+ * parameter defaults from the job definition. For more information about specifying parameters, see Job Definition Parameters
+ * in the AWS Batch User Guide.
The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout - * duration after which AWS Batch terminates your jobs if they have not finished.
+ *The retry strategy to use for failed jobs that are submitted with this job definition.
*/ - timeout?: JobTimeout; + retryStrategy?: RetryStrategy; /** - *Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are
- * specified as a key-value pair mapping. Parameters in a SubmitJob
request override any corresponding
- * parameter defaults from the job definition. For more information about specifying parameters, see Job Definition
- * Parameters in the AWS Batch User Guide.
An object with various properties specific to container-based jobs.
*/ - parameters?: { [key: string]: string }; + containerProperties?: ContainerProperties; /** - *The Amazon Resource Name (ARN) for the job definition.
+ *The timeout configuration for jobs that are submitted with this job definition. You can specify a timeout + * duration after which AWS Batch terminates your jobs if they have not finished.
*/ - jobDefinitionArn: string | undefined; + timeout?: JobTimeout; /** - *The name of the job definition.
+ *An object with various properties specific to multi-node parallel jobs.
*/ - jobDefinitionName: string | undefined; + nodeProperties?: NodeProperties; /** - *An object with various properties specific to container-based jobs.
+ *The tags applied to the job definition.
*/ - containerProperties?: ContainerProperties; + tags?: { [key: string]: string }; } export namespace JobDefinition { @@ -1655,17 +1706,16 @@ export namespace JobDefinition { export interface DescribeJobDefinitionsResponse { /** - *The nextToken
value to include in a future DescribeJobDefinitions
request. When the
- * results of a DescribeJobDefinitions
request exceed maxResults
, this value can be used to
- * retrieve the next page of results. This value is null
when there are no more results to
- * return.
The list of job definitions.
*/ - nextToken?: string; + jobDefinitions?: JobDefinition[]; /** - *The list of job definitions.
+ *The nextToken
value to include in a future DescribeJobDefinitions
request. When the
+ * results of a DescribeJobDefinitions
request exceed maxResults
, this value can be used to
+ * retrieve the next page of results. This value is null
when there are no more results to return.
A list of up to 100 queue names or full queue Amazon Resource Name (ARN) entries.
+ */ + jobQueues?: string[]; + /** *The maximum number of results returned by DescribeJobQueues
in paginated output. When this
- * parameter is used, DescribeJobQueues
only returns maxResults
results in a single page
- * along with a nextToken
response element. The remaining results of the initial request can be seen by
- * sending another DescribeJobQueues
request with the returned nextToken
value. This value
- * can be between 1 and 100. If this parameter is not used, then
- * DescribeJobQueues
returns up to 100 results and a nextToken
value if
- * applicable.
DescribeJobQueues
only returns maxResults
results in a single page along
+ * with a nextToken
response element. The remaining results of the initial request can be seen by sending
+ * another DescribeJobQueues
request with the returned nextToken
value. This value can be
+ * between 1 and 100. If this parameter is not used, then DescribeJobQueues
+ * returns up to 100 results and a nextToken
value if applicable.
*/
maxResults?: number;
/**
- * The nextToken
value returned from a previous paginated DescribeJobQueues
request
- * where maxResults
was used and the results exceeded the value of that parameter. Pagination continues
- * from the end of the previous results that returned the nextToken
value. This value is
- * null
when there are no more results to return.
The nextToken
value returned from a previous paginated DescribeJobQueues
request where
+ * maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the
+ * end of the previous results that returned the nextToken
value. This value is null
when
+ * there are no more results to return.
This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
*A list of up to 100 queue names or full queue Amazon Resource Name (ARN) entries.
- */ - jobQueues?: string[]; } export namespace DescribeJobQueuesRequest { @@ -1724,26 +1773,15 @@ export enum JQStatus { */ export interface JobQueueDetail { /** - *The compute environments that are attached to the job queue and the order in which job placement is preferred. - * Compute environments are selected for job placement in ascending order.
- */ - computeEnvironmentOrder: ComputeEnvironmentOrder[] | undefined; - - /** - *A short, human-readable string to provide additional details about the current status of the job queue.
+ *The name of the job queue.
*/ - statusReason?: string; + jobQueueName: string | undefined; /** *The Amazon Resource Name (ARN) of the job queue.
*/ jobQueueArn: string | undefined; - /** - *The name of the job queue.
- */ - jobQueueName: string | undefined; - /** *Describes the ability of the queue to accept new jobs. If the job queue state is ENABLED
, it is
* able to accept jobs. If the job queue state is DISABLED
, new jobs cannot be added to the queue, but jobs
@@ -1751,20 +1789,31 @@ export interface JobQueueDetail {
*/
state: JQState | string | undefined;
+ /**
+ *
The status of the job queue (for example, CREATING
or VALID
).
A short, human-readable string to provide additional details about the current status of the job queue.
+ */ + statusReason?: string; + /** *The priority of the job queue.
*/ priority: number | undefined; /** - *The tags applied to the job queue.
+ *The compute environments that are attached to the job queue and the order in which job placement is preferred. + * Compute environments are selected for job placement in ascending order.
*/ - tags?: { [key: string]: string }; + computeEnvironmentOrder: ComputeEnvironmentOrder[] | undefined; /** - *The status of the job queue (for example, CREATING
or VALID
).
The tags applied to the job queue.
*/ - status?: JQStatus | string; + tags?: { [key: string]: string }; } export namespace JobQueueDetail { @@ -1780,10 +1829,9 @@ export interface DescribeJobQueuesResponse { jobQueues?: JobQueueDetail[]; /** - *The nextToken
value to include in a future DescribeJobQueues
request. When the
- * results of a DescribeJobQueues
request exceed maxResults
, this value can be used to
- * retrieve the next page of results. This value is null
when there are no more results to
- * return.
The nextToken
value to include in a future DescribeJobQueues
request. When the results
+ * of a DescribeJobQueues
request exceed maxResults
, this value can be used to retrieve the
+ * next page of results. This value is null
when there are no more results to return.
The type and amount of a resource to assign to a container. Currently, the only supported resource is
- * GPU
.
The image used to start the container.
*/ - resourceRequirements?: ResourceRequirement[]; + image?: string; /** - *When this parameter is true, the container is given elevated privileges on the host container instance
- * (similar to the root
user).
The number of VCPUs allocated for the job. This is a required parameter.
*/ - privileged?: boolean; + vcpus?: number; /** - *The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For more information, see AWS Batch execution IAM role.
+ *The number of MiB of memory reserved for the job. This is a required parameter.
*/ - executionRoleArn?: string; + memory?: number; /** - *The number of VCPUs allocated for the job. This is a required parameter.
+ *The command that is passed to the container.
*/ - vcpus?: number; + command?: string[]; /** - *When this parameter is true, the container is given read-only access to its root file system.
+ *The Amazon Resource Name (ARN) associated with the job upon execution.
*/ - readonlyRootFilesystem?: boolean; + jobRoleArn?: string; /** - *A list of ulimit
values to set in the container.
The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For more information, see AWS Batch execution IAM role.
*/ - ulimits?: Ulimit[]; + executionRoleArn?: string; /** - *The log configuration specification for the container.
- *This parameter maps to LogConfig
in the Create a container section of the
- * Docker Remote API and the --log-driver
option to docker
- * run. By default, containers use the same logging driver that the Docker daemon uses. However the
- * container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter
- * in the container definition. To use a different logging driver for a container, the log system must be configured
- * properly on the container instance (or on a different log server for remote logging options). For more information
- * on the options for different supported log drivers, see Configure logging drivers in the Docker
- * documentation.
AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the - * Amazon ECS container agent.
- *This parameter requires version 1.18 of the Docker Remote API or greater on your
- * container instance. To check the Docker Remote API version on your container instance, log into your
- * container instance and run the following command: sudo docker version | grep "Server API version"
- *
A list of volumes associated with the job.
+ */ + volumes?: Volume[]; + + /** + *The environment variables to pass to a container.
*The Amazon ECS container agent running on a container instance must register the logging drivers available on
- * that instance with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed
- * on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the
- * Amazon Elastic Container Service Developer Guide.
Environment variables must not start with AWS_BATCH
; this naming
+ * convention is reserved for variables that are set by the AWS Batch service.
The mount points for data volumes in your container.
@@ -1876,19 +1909,20 @@ export interface ContainerDetail { mountPoints?: MountPoint[]; /** - *A list of volumes associated with the job.
+ *When this parameter is true, the container is given read-only access to its root file system.
*/ - volumes?: Volume[]; + readonlyRootFilesystem?: boolean; /** - *Linux-specific modifications that are applied to the container, such as details for device mappings.
+ *A list of ulimit
values to set in the container.
The Amazon Resource Name (ARN) of the container instance on which the container is running.
+ *When this parameter is true, the container is given elevated privileges on the host container instance (similar
+ * to the root
user).
The user name to use inside the container.
@@ -1896,19 +1930,9 @@ export interface ContainerDetail { user?: string; /** - *The network interfaces associated with the job.
- */ - networkInterfaces?: NetworkInterface[]; - - /** - *The number of MiB of memory reserved for the job. This is a required parameter.
- */ - memory?: number; - - /** - *The image used to start the container.
+ *The exit code to return upon completion.
*/ - image?: string; + exitCode?: number; /** *A short (255 max characters) human-readable string to provide additional details about a running or stopped @@ -1917,14 +1941,9 @@ export interface ContainerDetail { reason?: string; /** - *
The exit code to return upon completion.
- */ - exitCode?: number; - - /** - *The Amazon Resource Name (ARN) associated with the job upon execution.
+ *The Amazon Resource Name (ARN) of the container instance on which the container is running.
*/ - jobRoleArn?: string; + containerInstanceArn?: string; /** *The Amazon Resource Name (ARN) of the Amazon ECS task that is associated with the container job. Each container attempt receives a @@ -1932,37 +1951,66 @@ export interface ContainerDetail { */ taskArn?: string; + /** + *
The name of the CloudWatch Logs log stream associated with the container. The log group for AWS Batch jobs is
+ * /aws/batch/job
. Each container attempt receives a log stream name when they reach the
+ * RUNNING
status.
The instance type of the underlying host infrastructure of a multi-node parallel job.
*/ instanceType?: string; /** - *The name of the CloudWatch Logs log stream associated with the container. The log group for AWS Batch jobs is
- * /aws/batch/job
. Each container attempt receives a log stream name when they reach the
- * RUNNING
status.
The network interfaces associated with the job.
*/ - logStreamName?: string; + networkInterfaces?: NetworkInterface[]; /** - *The command that is passed to the container.
+ *The type and amount of a resource to assign to a container. Currently, the only supported resource is
+ * GPU
.
The secrets to pass to the container. For more information, see Specifying Sensitive Data in the - * Amazon Elastic Container Service Developer Guide.
+ *Linux-specific modifications that are applied to the container, such as details for device mappings.
*/ - secrets?: Secret[]; + linuxParameters?: LinuxParameters; /** - *The environment variables to pass to a container.
+ *The log configuration specification for the container.
+ *This parameter maps to LogConfig
in the Create a container section of the
+ * Docker Remote API and the --log-driver
option to docker run.
+ * By default, containers use the same logging driver that the Docker daemon uses. However the container may use a
+ * different logging driver than the Docker daemon by specifying a log driver with this parameter in the container
+ * definition. To use a different logging driver for a container, the log system must be configured properly on the
+ * container instance (or on a different log server for remote logging options). For more information on the options for
+ * different supported log drivers, see Configure
+ * logging drivers in the Docker documentation.
Environment variables must not start with AWS_BATCH
; this naming
- * convention is reserved for variables that are set by the AWS Batch service.
AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS + * container agent.
+ *This parameter requires version 1.18 of the Docker Remote API or greater on your
+ * container instance. To check the Docker Remote API version on your container instance, log into your
+ * container instance and run the following command: sudo docker version | grep "Server API version"
+ *
The Amazon ECS container agent running on a container instance must register the logging drivers available on that
+ * instance with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that
+ * instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the
+ * Amazon Elastic Container Service Developer Guide.
The secrets to pass to the container. For more information, see Specifying Sensitive Data in the + * Amazon Elastic Container Service Developer Guide.
+ */ + secrets?: Secret[]; } export namespace ContainerDetail { @@ -1997,15 +2045,15 @@ export namespace JobDependency { */ export interface NodeDetails { /** - *Specifies whether the current node is the main node for a multi-node parallel job.
+ *The node index for the node. Node index numbering begins at zero. This index is also available on the node with
+ * the AWS_BATCH_JOB_NODE_INDEX
environment variable.
The node index for the node. Node index numbering begins at zero. This index is also available on the node
- * with the AWS_BATCH_JOB_NODE_INDEX
environment variable.
Specifies whether the current node is the main node for a multi-node parallel job.
*/ - nodeIndex?: number; + isMainNode?: boolean; } export namespace NodeDetails { @@ -2029,21 +2077,19 @@ export enum JobStatus { */ export interface JobDetail { /** - *An object representing the node properties of a multi-node parallel job.
+ *The Amazon Resource Name (ARN) of the job.
*/ - nodeProperties?: NodeProperties; + jobArn?: string; /** - *The Unix timestamp (in milliseconds) for when the job was started (when the job transitioned from the
- * STARTING
state to the RUNNING
state). This parameter is not provided for
- * child jobs of array jobs or multi-node parallel jobs.
The name of the job.
*/ - startedAt: number | undefined; + jobName: string | undefined; /** - *The Amazon Resource Name (ARN) of the job.
+ *The ID for the job.
*/ - jobArn?: string; + jobId: string | undefined; /** *The Amazon Resource Name (ARN) of the job queue with which the job is associated.
@@ -2051,93 +2097,96 @@ export interface JobDetail { jobQueue: string | undefined; /** - *An object representing the details of the container that is associated with the job.
+ *The current status for the job.
+ *If your jobs do not progress to STARTING
, see Jobs Stuck in RUNNABLE Status in
+ * the troubleshooting section of the AWS Batch User Guide.
The job definition that is used by this job.
+ *A list of job attempts associated with this job.
*/ - jobDefinition: string | undefined; + attempts?: AttemptDetail[]; /** - *The ID for the job.
+ *A short, human-readable string to provide additional details about the current status of the job.
*/ - jobId: string | undefined; + statusReason?: string; /** - *Additional parameters passed to the job that replace parameter substitution placeholders or override any - * corresponding parameter defaults from the job definition.
+ *The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs,
+ * this is when the job entered the SUBMITTED
state (at the time SubmitJob was called).
+ * For array child jobs, this is when the child job was spawned by its parent and entered the PENDING
+ * state.
An object representing the details of a node that is associated with a multi-node parallel job.
+ *The retry strategy to use for this job if an attempt fails.
*/ - nodeDetails?: NodeDetails; + retryStrategy?: RetryStrategy; /** - *A list of job IDs on which this job depends.
+ *The Unix timestamp (in milliseconds) for when the job was started (when the job transitioned from the
+ * STARTING
state to the RUNNING
+ * state). This parameter is not provided for
+ * child jobs of array jobs or multi-node parallel jobs.
The retry strategy to use for this job if an attempt fails.
+ *The Unix timestamp (in milliseconds) for when the job was stopped (when the job transitioned from the
+ * RUNNING
state to a terminal state, such as SUCCEEDED
or FAILED
).
The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs,
- * this is when the job entered the SUBMITTED
state (at the time SubmitJob
- * was called). For array child jobs, this is when the child job was spawned by its parent and entered the
- * PENDING
state.
A list of job IDs on which this job depends.
*/ - createdAt?: number; + dependsOn?: JobDependency[]; /** - *The current status for the job.
- *If your jobs do not progress to STARTING
, see Jobs Stuck in RUNNABLE Status
- * in the troubleshooting section of the AWS Batch User Guide.
The job definition that is used by this job.
*/ - status: JobStatus | string | undefined; + jobDefinition: string | undefined; /** - *A list of job attempts associated with this job.
+ *Additional parameters passed to the job that replace parameter substitution placeholders or override any + * corresponding parameter defaults from the job definition.
*/ - attempts?: AttemptDetail[]; + parameters?: { [key: string]: string }; /** - *The tags applied to the job.
+ *An object representing the details of the container that is associated with the job.
*/ - tags?: { [key: string]: string }; + container?: ContainerDetail; /** - *The name of the job.
+ *An object representing the details of a node that is associated with a multi-node parallel job.
*/ - jobName: string | undefined; + nodeDetails?: NodeDetails; /** - *A short, human-readable string to provide additional details about the current status of the job.
+ *An object representing the node properties of a multi-node parallel job.
*/ - statusReason?: string; + nodeProperties?: NodeProperties; /** - *The timeout configuration for the job.
+ *The array properties of the job, if it is an array job.
*/ - timeout?: JobTimeout; + arrayProperties?: ArrayPropertiesDetail; /** - *The Unix timestamp (in milliseconds) for when the job was stopped (when the job transitioned from the
- * RUNNING
state to a terminal state, such as SUCCEEDED
or FAILED
).
The timeout configuration for the job.
*/ - stoppedAt?: number; + timeout?: JobTimeout; /** - *The array properties of the job, if it is an array job.
+ *The tags applied to the job.
*/ - arrayProperties?: ArrayPropertiesDetail; + tags?: { [key: string]: string }; } export namespace JobDetail { @@ -2161,49 +2210,49 @@ export namespace DescribeJobsResponse { export interface ListJobsRequest { /** - *The nextToken
value returned from a previous paginated ListJobs
request where
- * maxResults
was used and the results exceeded the value of that parameter. Pagination continues from
- * the end of the previous results that returned the nextToken
value. This value is null
- * when there are no more results to return.
This token should be treated as an opaque identifier that is only used to - * retrieve the next items in a list and not for other programmatic purposes.
- *The name or full Amazon Resource Name (ARN) of the job queue with which to list jobs.
*/ - nextToken?: string; + jobQueue?: string; /** - *The job ID for an array job. Specifying an array job ID with this parameter lists all child jobs from within - * the specified array.
+ *The job ID for an array job. Specifying an array job ID with this parameter lists all child jobs from within the + * specified array.
*/ arrayJobId?: string; /** - *The job ID for a multi-node parallel job. Specifying a multi-node parallel job ID with this parameter lists - * all nodes that are associated with the specified job.
+ *The job ID for a multi-node parallel job. Specifying a multi-node parallel job ID with this parameter lists all + * nodes that are associated with the specified job.
*/ multiNodeJobId?: string; + /** + *The job status with which to filter jobs in the specified queue. If you do not specify a status, only
+ * RUNNING
jobs are returned.
The maximum number of results returned by ListJobs
in paginated output. When this parameter is
* used, ListJobs
only returns maxResults
results in a single page along with a
- * nextToken
response element. The remaining results of the initial request can be seen by sending
- * another ListJobs
request with the returned nextToken
value. This value can be between
+ * nextToken
response element. The remaining results of the initial request can be seen by sending another
+ * ListJobs
request with the returned nextToken
value. This value can be between
* 1 and 100. If this parameter is not used, then ListJobs
returns up to
* 100 results and a nextToken
value if applicable.
The name or full Amazon Resource Name (ARN) of the job queue with which to list jobs.
- */ - jobQueue?: string; - - /** - *The job status with which to filter jobs in the specified queue. If you do not specify a status, only
- * RUNNING
jobs are returned.
The nextToken
value returned from a previous paginated ListJobs
request where
+ * maxResults
was used and the results exceeded the value of that parameter. Pagination continues from the
+ * end of the previous results that returned the nextToken
value. This value is null
when
+ * there are no more results to return.
This token should be treated as an opaque identifier that is only used to + * retrieve the next items in a list and not for other programmatic purposes.
+ *A short (255 max characters) human-readable string to provide additional details about a running or stopped - * container.
+ *The exit code to return upon completion.
*/ - reason?: string; + exitCode?: number; /** - *The exit code to return upon completion.
+ *A short (255 max characters) human-readable string to provide additional details about a running or stopped + * container.
*/ - exitCode?: number; + reason?: string; } export namespace ContainerSummary { @@ -2238,12 +2287,6 @@ export namespace ContainerSummary { *An object representing the properties of a node that is associated with a multi-node parallel job.
*/ export interface NodePropertiesSummary { - /** - *The node index for the node. Node index numbering begins at zero. This index is also available on the node
- * with the AWS_BATCH_JOB_NODE_INDEX
environment variable.
Specifies whether the current node is the main node for a multi-node parallel job.
*/ @@ -2253,6 +2296,12 @@ export interface NodePropertiesSummary { *The number of nodes associated with a multi-node parallel job.
*/ numNodes?: number; + + /** + *The node index for the node. Node index numbering begins at zero. This index is also available on the node with
+ * the AWS_BATCH_JOB_NODE_INDEX
environment variable.
The node properties for a single node in a job summary list.
+ *The Amazon Resource Name (ARN) of the job.
*/ - nodeProperties?: NodePropertiesSummary; + jobArn?: string; /** - *The Unix timestamp for when the job was started (when the job transitioned from the STARTING
- * state to the RUNNING
state).
The ID of the job.
*/ - startedAt?: number; + jobId: string | undefined; /** - *The ID of the job.
+ *The name of the job.
*/ - jobId: string | undefined; + jobName: string | undefined; /** - *An object representing the details of the container that is associated with the job.
+ *The Unix timestamp for when the job was created. For non-array jobs and parent array jobs, this is when the job
+ * entered the SUBMITTED
state (at the time SubmitJob was called). For array child jobs,
+ * this is when the child job was spawned by its parent and entered the PENDING
state.
The Unix timestamp for when the job was stopped (when the job transitioned from the RUNNING
state
- * to a terminal state, such as SUCCEEDED
or FAILED
).
The current status for the job.
*/ - stoppedAt?: number; + status?: JobStatus | string; /** - *The array properties of the job, if it is an array job.
+ *A short, human-readable string to provide additional details about the current status of the job.
*/ - arrayProperties?: ArrayPropertiesSummary; + statusReason?: string; /** - *The Unix timestamp for when the job was created. For non-array jobs and parent array jobs, this is when the
- * job entered the SUBMITTED
state (at the time SubmitJob was called). For array child
- * jobs, this is when the child job was spawned by its parent and entered the PENDING
state.
The Unix timestamp for when the job was started (when the job transitioned from the STARTING
state
+ * to the RUNNING
state).
A short, human-readable string to provide additional details about the current status of the job.
+ *The Unix timestamp for when the job was stopped (when the job transitioned from the RUNNING
state
+ * to a terminal state, such as SUCCEEDED
or FAILED
).
The current status for the job.
+ *An object representing the details of the container that is associated with the job.
*/ - status?: JobStatus | string; + container?: ContainerSummary; /** - *The name of the job.
+ *The array properties of the job, if it is an array job.
*/ - jobName: string | undefined; + arrayProperties?: ArrayPropertiesSummary; /** - *The Amazon Resource Name (ARN) of the job.
+ *The node properties for a single node in a job summary list.
*/ - jobArn?: string; + nodeProperties?: NodePropertiesSummary; } export namespace JobSummary { @@ -2339,8 +2388,8 @@ export interface ListJobsResponse { /** *The nextToken
value to include in a future ListJobs
request. When the results of a
- * ListJobs
request exceed maxResults
, this value can be used to retrieve the next page
- * of results. This value is null
when there are no more results to return.
ListJobs
request exceed maxResults
, this value can be used to retrieve the next page of
+ * results. This value is null
when there are no more results to return.
*/
nextToken?: string;
}
@@ -2385,32 +2434,29 @@ export enum JobDefinitionType {
export interface RegisterJobDefinitionRequest {
/**
- * The timeout configuration for jobs that are submitted with this job definition, after which AWS Batch terminates - * your jobs if they have not finished. If a job is terminated due to a timeout, it is not retried. The minimum value - * for the timeout is 60 seconds. Any timeout configuration that is specified during a SubmitJob - * operation overrides the timeout configuration defined here. For more information, see Job Timeouts in the - * Amazon Elastic Container Service Developer Guide.
+ *The name of the job definition to register. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and + * underscores are allowed.
*/ - timeout?: JobTimeout; + jobDefinitionName: string | undefined; /** - *Default parameter substitution placeholders to set in the job definition. Parameters are specified as a
- * key-value pair mapping. Parameters in a SubmitJob
request override any corresponding parameter
- * defaults from the job definition.
The type of job definition.
*/ - parameters?: { [key: string]: string }; + type: JobDefinitionType | string | undefined; /** - *The type of job definition.
+ *Default parameter substitution placeholders to set in the job definition. Parameters are specified as a
+ * key-value pair mapping. Parameters in a SubmitJob
request override any corresponding parameter defaults
+ * from the job definition.
The tags that you apply to the job definition to help you categorize and organize your resources. Each tag - * consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General - * Reference.
+ *An object with various properties specific to single-node container-based jobs. If the job definition's
+ * type
parameter is container
, then you must specify either containerProperties
+ * or nodeProperties
.
An object with various properties specific to multi-node parallel jobs. If you specify node properties for a @@ -2421,13 +2467,6 @@ export interface RegisterJobDefinitionRequest { */ nodeProperties?: NodeProperties; - /** - *
An object with various properties specific to single-node container-based jobs. If the job definition's
- * type
parameter is container
, then you must specify either
- * containerProperties
or nodeProperties
.
The retry strategy to use for failed jobs that are submitted with this job definition. Any retry strategy that * is specified during a SubmitJob operation overrides the retry strategy defined here. If a job is @@ -2436,10 +2475,20 @@ export interface RegisterJobDefinitionRequest { retryStrategy?: RetryStrategy; /** - *
The name of the job definition to register. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and - * underscores are allowed.
+ *The timeout configuration for jobs that are submitted with this job definition, after which AWS Batch terminates + * your jobs if they have not finished. If a job is terminated due to a timeout, it is not retried. The minimum value + * for the timeout is 60 seconds. Any timeout configuration that is specified during a SubmitJob + * operation overrides the timeout configuration defined here. For more information, see Job Timeouts in the + * Amazon Elastic Container Service Developer Guide.
*/ - jobDefinitionName: string | undefined; + timeout?: JobTimeout; + + /** + *The tags that you apply to the job definition to help you categorize and organize your resources. Each tag + * consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General + * Reference.
+ */ + tags?: { [key: string]: string }; } export namespace RegisterJobDefinitionRequest { @@ -2476,27 +2525,22 @@ export namespace RegisterJobDefinitionResponse { */ export interface ContainerOverrides { /** - *The number of MiB of memory reserved for the job. This value overrides the value set in the job + *
The number of vCPUs to reserve for the container. This value overrides the value set in the job * definition.
*/ - memory?: number; + vcpus?: number; /** - *The type and amount of a resource to assign to a container. This value overrides the value set in the job
- * definition. Currently, the only supported resource is GPU
.
The number of MiB of memory reserved for the job. This value overrides the value set in the job + * definition.
*/ - resourceRequirements?: ResourceRequirement[]; + memory?: number; /** - *The environment variables to send to the container. You can add new environment variables, which are added to - * the container at launch, or you can override the existing environment variables from the Docker image or the job + *
The command to send to the container that overrides the default command from the Docker image or the job * definition.
- *Environment variables must not start with AWS_BATCH
; this naming
- * convention is reserved for variables that are set by the AWS Batch service.
The instance type to use for a multi-node parallel job. This parameter is not valid for single-node container @@ -2505,16 +2549,21 @@ export interface ContainerOverrides { instanceType?: string; /** - *
The command to send to the container that overrides the default command from the Docker image or the job + *
The environment variables to send to the container. You can add new environment variables, which are added to + * the container at launch, or you can override the existing environment variables from the Docker image or the job * definition.
+ *Environment variables must not start with AWS_BATCH
; this naming
+ * convention is reserved for variables that are set by the AWS Batch service.
The number of vCPUs to reserve for the container. This value overrides the value set in the job - * definition.
+ *The type and amount of a resource to assign to a container. This value overrides the value set in the job
+ * definition. Currently, the only supported resource is GPU
.
The overrides that should be sent to a node range.
+ *The range of nodes, using node index values, with which to override. A range of 0:3
indicates nodes
+ * with index values of 0
through 3
. If the starting range value is omitted (:n
),
+ * then 0
is used to start the range. If the ending range value is omitted (n:
), then the
+ * highest possible node index is used to end the range.
The range of nodes, using node index values, with which to override. A range of 0:3
indicates
- * nodes with index values of 0
through 3
. If the starting range value is omitted
- * (:n
), then 0
is used to start the range. If the ending range value is omitted
- * (n:
), then the highest possible node index is used to end the range.
The overrides that should be sent to a node range.
*/ - targetNodes: string | undefined; + containerOverrides?: ContainerOverrides; } export namespace NodePropertyOverride { @@ -2553,11 +2602,6 @@ export namespace NodePropertyOverride { * operation. */ export interface NodeOverrides { - /** - *The node property overrides for the job.
- */ - nodePropertyOverrides?: NodePropertyOverride[]; - /** *The number of nodes to use with a multi-node parallel job. This value overrides the number of nodes that are * specified in the job definition. To use this override:
@@ -2567,16 +2611,21 @@ export interface NodeOverrides { *:
or n:
).
* The lower boundary of the node range specified in the job definition must be fewer than the number of - * nodes specified in the override.
+ *The lower boundary of the node range specified in the job definition must be fewer than the number of nodes + * specified in the override.
*The main node index specified in the job definition must be fewer than the number of nodes specified in - * the override.
+ *The main node index specified in the job definition must be fewer than the number of nodes specified in the + * override.
*The node property overrides for the job.
+ */ + nodePropertyOverrides?: NodePropertyOverride[]; } export namespace NodeOverrides { @@ -2586,6 +2635,12 @@ export namespace NodeOverrides { } export interface SubmitJobRequest { + /** + *The name of the job. The first character must be alphanumeric, and up to 128 letters (uppercase and lowercase), + * numbers, hyphens, and underscores are allowed.
+ */ + jobName: string | undefined; + /** *The job queue into which the job is submitted. You can specify either the name or the Amazon Resource Name (ARN) of the * queue.
@@ -2593,35 +2648,43 @@ export interface SubmitJobRequest { jobQueue: string | undefined; /** - *The name of the job. The first character must be alphanumeric, and up to 128 letters (uppercase and - * lowercase), numbers, hyphens, and underscores are allowed.
+ *The array properties for the submitted job, such as the size of the array. The array size can be between 2 and + * 10,000. If you specify array properties for a job, it becomes an array job. For more information, see Array Jobs in the + * AWS Batch User Guide.
*/ - jobName: string | undefined; + arrayProperties?: ArrayProperties; + + /** + *A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a
+ * SEQUENTIAL
type dependency without specifying a job ID for array jobs so that each child array job
+ * completes sequentially, starting at index 0. You can also specify an N_TO_N
type dependency with a job
+ * ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each
+ * dependency to complete before it can begin.
The job definition used by this job. This value can be one of name
, name:revision
,
- * or the Amazon Resource Name (ARN) for the job definition. If
- * name
is specified without a revision then the latest active revision is
- * used.
The job definition used by this job. This value can be one of name
, name:revision
, or
+ * the Amazon Resource Name (ARN) for the job definition. If
+ * name
is specified without a revision then the latest active revision is used.
The timeout configuration for this SubmitJob operation. You can specify a timeout duration - * after which AWS Batch terminates your jobs if they have not finished. If a job is terminated due to a timeout, it is - * not retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout - * configuration specified in the job definition. For array jobs, child jobs have the same timeout configuration as - * the parent job. For more information, see Job Timeouts in the - * Amazon Elastic Container Service Developer Guide.
+ *Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job
+ * definition. Parameters are specified as a key and value pair mapping. Parameters in a SubmitJob
request
+ * override any corresponding parameter defaults from the job definition.
Additional parameters passed to the job that replace parameter substitution placeholders that are set in the
- * job definition. Parameters are specified as a key and value pair mapping. Parameters in a SubmitJob
- * request override any corresponding parameter defaults from the job definition.
A list of container overrides in JSON format that specify the name of a container in the specified job
+ * definition and the overrides it should receive. You can override the default command for a container (that is
+ * specified in the job definition or the Docker image) with a command
override. You can also override
+ * existing environment variables (that are specified in the job definition or Docker image) on a container or add new
+ * environment variables to it with an environment
override.
A list of node overrides in JSON format that specify the node range to target and the container overrides for @@ -2630,42 +2693,27 @@ export interface SubmitJobRequest { nodeOverrides?: NodeOverrides; /** - *
The retry strategy to use for failed jobs from this SubmitJob operation. When a retry - * strategy is specified here, it overrides the retry strategy defined in the job definition.
+ *The retry strategy to use for failed jobs from this SubmitJob operation. When a retry strategy + * is specified here, it overrides the retry strategy defined in the job definition.
*/ retryStrategy?: RetryStrategy; /** - *The array properties for the submitted job, such as the size of the array. The array size can be between 2 and - * 10,000. If you specify array properties for a job, it becomes an array job. For more information, see Array Jobs in the - * AWS Batch User Guide.
+ *The timeout configuration for this SubmitJob operation. You can specify a timeout duration + * after which AWS Batch terminates your jobs if they have not finished. If a job is terminated due to a timeout, it is + * not retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration + * specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job. + * For more information, see Job + * Timeouts in the Amazon Elastic Container Service Developer Guide.
*/ - arrayProperties?: ArrayProperties; + timeout?: JobTimeout; /** - *The tags that you apply to the job request to help you categorize and organize your resources. Each tag - * consists of a key and an optional value. For more information, see Tagging AWS Resources in AWS General - * Reference.
+ *The tags that you apply to the job request to help you categorize and organize your resources. Each tag consists + * of a key and an optional value. For more information, see Tagging AWS Resources in AWS General + * Reference.
*/ tags?: { [key: string]: string }; - - /** - *A list of container overrides in JSON format that specify the name of a container in the specified job
- * definition and the overrides it should receive. You can override the default command for a container (that is
- * specified in the job definition or the Docker image) with a command
override. You can also override
- * existing environment variables (that are specified in the job definition or Docker image) on a container or add
- * new environment variables to it with an environment
override.
A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a
- * SEQUENTIAL
type dependency without specifying a job ID for array jobs so that each child array job
- * completes sequentially, starting at index 0. You can also specify an N_TO_N
type dependency with a
- * job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of
- * each dependency to complete before it can begin.
The Amazon Resource Name (ARN) for the job.
+ */ + jobArn?: string; + /** *The name of the job.
*/ @@ -2684,11 +2737,6 @@ export interface SubmitJobResponse { *The unique identifier for the job.
*/ jobId: string | undefined; - - /** - *The Amazon Resource Name (ARN) for the job.
- */ - jobArn?: string; } export namespace SubmitJobResponse { @@ -2698,18 +2746,18 @@ export namespace SubmitJobResponse { } export interface TagResourceRequest { - /** - *The tags that you apply to the resource to help you categorize and organize your resources. Each tag consists - * of a key and an optional value. For more information, see Tagging AWS Resources in AWS General - * Reference.
- */ - tags: { [key: string]: string } | undefined; - /** *The Amazon Resource Name (ARN) of the resource to which to add tags. AWS Batch resources that support tags are compute environments, jobs, job definitions, and job * queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
*/ resourceArn: string | undefined; + + /** + *The tags that you apply to the resource to help you categorize and organize your resources. Each tag consists of + * a key and an optional value. For more information, see Tagging AWS Resources in AWS General + * Reference.
+ */ + tags: { [key: string]: string } | undefined; } export namespace TagResourceRequest { @@ -2809,18 +2857,9 @@ export namespace ComputeResourceUpdate { export interface UpdateComputeEnvironmentRequest { /** - *The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your - * behalf.
- *If your specified role has a path other than /
, then you must either specify the full role ARN
- * (this is recommended) or prefix the role name with the path.
Depending on how you created your AWS Batch service role, its ARN may contain the service-role
- * path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN does not use the
- * service-role
path prefix. Because of this, we recommend that you specify the full ARN of your
- * service role when you create compute environments.
The name or full Amazon Resource Name (ARN) of the compute environment to update.
*/ - serviceRole?: string; + computeEnvironment: string | undefined; /** *The state of the compute environment. Compute environments in the ENABLED
state can accept jobs
@@ -2835,9 +2874,18 @@ export interface UpdateComputeEnvironmentRequest {
computeResources?: ComputeResourceUpdate;
/**
- *
The name or full Amazon Resource Name (ARN) of the compute environment to update.
+ *The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your + * behalf.
+ *If your specified role has a path other than /
, then you must either specify the full role ARN
+ * (this is recommended) or prefix the role name with the path.
Depending on how you created your AWS Batch service role, its ARN may contain the service-role
path
+ * prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN does not use the
+ * service-role
path prefix. Because of this, we recommend that you specify the full ARN of your service
+ * role when you create compute environments.
The Amazon Resource Name (ARN) of the compute environment.
+ *The name of the compute environment.
*/ - computeEnvironmentArn?: string; + computeEnvironmentName?: string; /** - *The name of the compute environment.
+ *The Amazon Resource Name (ARN) of the compute environment.
*/ - computeEnvironmentName?: string; + computeEnvironmentArn?: string; } export namespace UpdateComputeEnvironmentResponse { @@ -2865,6 +2913,11 @@ export namespace UpdateComputeEnvironmentResponse { } export interface UpdateJobQueueRequest { + /** + *The name or the Amazon Resource Name (ARN) of the job queue.
+ */ + jobQueue: string | undefined; + /** *Describes the queue's ability to accept new jobs. If the job queue state is ENABLED
, it is able to
* accept jobs. If the job queue state is DISABLED
, new jobs cannot be added to the queue, but jobs already
@@ -2872,25 +2925,20 @@ export interface UpdateJobQueueRequest {
*/
state?: JQState | string;
- /**
- *
Details the set of compute environments mapped to a job queue and their order relative to each other. This is - * one of the parameters used by the job scheduler to determine which compute environment should execute a given - * job.
- */ - computeEnvironmentOrder?: ComputeEnvironmentOrder[]; - /** *The priority of the job queue. Job queues with a higher priority (or a higher integer value for the
- * priority
parameter) are evaluated first when associated with the same compute environment. Priority
- * is determined in descending order, for example, a job queue with a priority value of 10
is given
- * scheduling preference over a job queue with a priority value of 1
.
priority
parameter) are evaluated first when associated with the same compute environment. Priority is
+ * determined in descending order, for example, a job queue with a priority value of 10
is given scheduling
+ * preference over a job queue with a priority value of 1
.
*/
priority?: number;
/**
- * The name or the Amazon Resource Name (ARN) of the job queue.
+ *Details the set of compute environments mapped to a job queue and their order relative to each other. This is + * one of the parameters used by the job scheduler to determine which compute environment should execute a given + * job.
*/ - jobQueue: string | undefined; + computeEnvironmentOrder?: ComputeEnvironmentOrder[]; } export namespace UpdateJobQueueRequest { diff --git a/clients/client-batch/protocols/Aws_restJson1.ts b/clients/client-batch/protocols/Aws_restJson1.ts index 70199607c59e..989b8ed18302 100644 --- a/clients/client-batch/protocols/Aws_restJson1.ts +++ b/clients/client-batch/protocols/Aws_restJson1.ts @@ -58,6 +58,7 @@ import { ContainerSummary, Device, DeviceCgroupPermission, + Ec2Configuration, EvaluateOnExit, Host, JobDefinition, @@ -1940,6 +1941,9 @@ const serializeAws_restJson1ComputeResource = (input: ComputeResource, context: ...(input.allocationStrategy !== undefined && { allocationStrategy: input.allocationStrategy }), ...(input.bidPercentage !== undefined && { bidPercentage: input.bidPercentage }), ...(input.desiredvCpus !== undefined && { desiredvCpus: input.desiredvCpus }), + ...(input.ec2Configuration !== undefined && { + ec2Configuration: serializeAws_restJson1Ec2ConfigurationList(input.ec2Configuration, context), + }), ...(input.ec2KeyPair !== undefined && { ec2KeyPair: input.ec2KeyPair }), ...(input.imageId !== undefined && { imageId: input.imageId }), ...(input.instanceRole !== undefined && { instanceRole: input.instanceRole }), @@ -2039,6 +2043,17 @@ const serializeAws_restJson1DevicesList = (input: Device[], context: __SerdeCont return input.map((entry) => serializeAws_restJson1Device(entry, context)); }; +const serializeAws_restJson1Ec2Configuration = (input: Ec2Configuration, context: __SerdeContext): any => { + return { + ...(input.imageIdOverride !== undefined && { imageIdOverride: input.imageIdOverride }), + ...(input.imageType !== undefined && { imageType: input.imageType }), + }; +}; + +const serializeAws_restJson1Ec2ConfigurationList = (input: Ec2Configuration[], context: __SerdeContext): any => { + return input.map((entry) => serializeAws_restJson1Ec2Configuration(entry, context)); +}; + const serializeAws_restJson1EnvironmentVariables = (input: KeyValuePair[], context: __SerdeContext): any => { return input.map((entry) => serializeAws_restJson1KeyValuePair(entry, context)); }; @@ -2430,6 +2445,10 @@ const deserializeAws_restJson1ComputeResource = (output: any, context: __SerdeCo bidPercentage: output.bidPercentage !== undefined && output.bidPercentage !== null ? output.bidPercentage : undefined, desiredvCpus: output.desiredvCpus !== undefined && output.desiredvCpus !== null ? output.desiredvCpus : undefined, + ec2Configuration: + output.ec2Configuration !== undefined && output.ec2Configuration !== null + ? deserializeAws_restJson1Ec2ConfigurationList(output.ec2Configuration, context) + : undefined, ec2KeyPair: output.ec2KeyPair !== undefined && output.ec2KeyPair !== null ? output.ec2KeyPair : undefined, imageId: output.imageId !== undefined && output.imageId !== null ? output.imageId : undefined, instanceRole: output.instanceRole !== undefined && output.instanceRole !== null ? output.instanceRole : undefined, @@ -2614,6 +2633,18 @@ const deserializeAws_restJson1DevicesList = (output: any, context: __SerdeContex return (output || []).map((entry: any) => deserializeAws_restJson1Device(entry, context)); }; +const deserializeAws_restJson1Ec2Configuration = (output: any, context: __SerdeContext): Ec2Configuration => { + return { + imageIdOverride: + output.imageIdOverride !== undefined && output.imageIdOverride !== null ? output.imageIdOverride : undefined, + imageType: output.imageType !== undefined && output.imageType !== null ? output.imageType : undefined, + } as any; +}; + +const deserializeAws_restJson1Ec2ConfigurationList = (output: any, context: __SerdeContext): Ec2Configuration[] => { + return (output || []).map((entry: any) => deserializeAws_restJson1Ec2Configuration(entry, context)); +}; + const deserializeAws_restJson1EnvironmentVariables = (output: any, context: __SerdeContext): KeyValuePair[] => { return (output || []).map((entry: any) => deserializeAws_restJson1KeyValuePair(entry, context)); }; diff --git a/clients/client-cloudformation/models/models_0.ts b/clients/client-cloudformation/models/models_0.ts index b3bff1a7f6f5..4e7392300ee0 100644 --- a/clients/client-cloudformation/models/models_0.ts +++ b/clients/client-cloudformation/models/models_0.ts @@ -18,12 +18,6 @@ export type AccountGateStatus = "FAILED" | "SKIPPED" | "SUCCEEDED"; * target account gate. */ export interface AccountGateResult { - /** - *The reason for the account gate status assigned to this account and Region for the - * stack set operation.
- */ - StatusReason?: string; - /** *The status of the account gate function.
*The reason for the account gate status assigned to this account and Region for the + * stack set operation.
+ */ + StatusReason?: string; } export namespace AccountGateResult { @@ -135,14 +135,14 @@ export namespace AlreadyExistsException { */ export interface AutoDeployment { /** - *If set to true
, stack resources are retained when an account is removed from a target organization or OU. If set to false
, stack resources are deleted. Specify only if Enabled
is set to True
.
If set to true
, StackSets automatically deploys additional stack instances to AWS Organizations accounts that are added to a target organization or organizational unit (OU) in the specified Regions. If an account is removed from a target organization or OU, StackSets deletes stack instances from the account in the specified Regions.
If set to true
, StackSets automatically deploys additional stack instances to AWS Organizations accounts that are added to a target organization or organizational unit (OU) in the specified Regions. If an account is removed from a target organization or OU, StackSets deletes stack instances from the account in the specified Regions.
If set to true
, stack resources are retained when an account is removed from a target organization or OU. If set to false
, stack resources are deleted. Specify only if Enabled
is set to True
.
The input for the CancelUpdateStack action.
*/ export interface CancelUpdateStackInput { + /** + *The name or the unique stack ID that is associated with the stack.
+ */ + StackName: string | undefined; + /** *A unique identifier for this CancelUpdateStack
request. Specify this
* token if you plan to retry requests so that AWS CloudFormation knows that you're not
@@ -163,11 +168,6 @@ export interface CancelUpdateStackInput {
* received them.
The name or the unique stack ID that is associated with the stack.
- */ - StackName: string | undefined; } export namespace CancelUpdateStackInput { @@ -253,15 +253,6 @@ export enum RequiresRecreation { * property, and whether the resource will be recreated. */ export interface ResourceTargetDefinition { - /** - *If the Attribute
value is Properties
, indicates whether a
- * change to this property causes the resource to be recreated. The value can be
- * Never
, Always
, or Conditionally
. To determine the
- * conditions for a Conditionally
recreation, see the update behavior for that
- * property in the AWS CloudFormation User Guide.
Indicates which resource attribute is triggering this update, such as a change in the
* resource attribute's Metadata
, Properties
, or
@@ -274,6 +265,15 @@ export interface ResourceTargetDefinition {
* property. For all other attributes, the value is null.
If the Attribute
value is Properties
, indicates whether a
+ * change to this property causes the resource to be recreated. The value can be
+ * Never
, Always
, or Conditionally
. To determine the
+ * conditions for a Conditionally
recreation, see the update behavior for that
+ * property in the AWS CloudFormation User Guide.
A ResourceTargetDefinition
structure that describes the field that AWS
+ * CloudFormation will change and whether the resource will be recreated.
Indicates whether AWS CloudFormation can determine the target value, and whether the + * target value will change before you execute a change set.
+ *For Static
evaluations, AWS CloudFormation can determine that the target
+ * value will change, and its value. For example, if you directly modify the
+ * InstanceType
property of an EC2 instance, AWS CloudFormation knows that
+ * this property value will change, and its value, so this is a Static
+ * evaluation.
For Dynamic
evaluations, cannot determine the target value because it
+ * depends on the result of an intrinsic function, such as a Ref
or
+ * Fn::GetAtt
intrinsic function, when the stack is updated. For example, if
+ * your template includes a reference to a resource that is conditionally recreated, the value
+ * of the reference (the physical ID of the resource) might change, depending on if the
+ * resource is recreated. If the resource is recreated, it will have a new physical ID, so all
+ * references to that resource will also be updated.
The group to which the CausingEntity
value belongs. There are five
* entity groups:
Indicates whether AWS CloudFormation can determine the target value, and whether the - * target value will change before you execute a change set.
- *For Static
evaluations, AWS CloudFormation can determine that the target
- * value will change, and its value. For example, if you directly modify the
- * InstanceType
property of an EC2 instance, AWS CloudFormation knows that
- * this property value will change, and its value, so this is a Static
- * evaluation.
For Dynamic
evaluations, cannot determine the target value because it
- * depends on the result of an intrinsic function, such as a Ref
or
- * Fn::GetAtt
intrinsic function, when the stack is updated. For example, if
- * your template includes a reference to a resource that is conditionally recreated, the value
- * of the reference (the physical ID of the resource) might change, depending on if the
- * resource is recreated. If the resource is recreated, it will have a new physical ID, so all
- * references to that resource will also be updated.
A ResourceTargetDefinition
structure that describes the field that AWS
- * CloudFormation will change and whether the resource will be recreated.
The identity of the entity that triggered this change. This entity is a member of the
* group that is specified by the ChangeSource
field. For example, if you
@@ -370,6 +370,37 @@ export namespace ResourceChangeDetail {
});
}
+/**
+ *
Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
+ *For more information on modules, see Using modules to encapsulate and reuse resource configurations in the CloudFormation User Guide.
+ */ +export interface ModuleInfo { + /** + *A concantenated list of the the module type or types containing the resource. Module types are listed starting with the inner-most nested module, and separated by /
.
In the following example, the resource was created from a module of type AWS::First::Example::MODULE
, that is nested inside a parent module of type AWS::Second::Example::MODULE
.
+ * AWS::First::Example::MODULE/AWS::Second::Example::MODULE
+ *
A concantenated list of the logical IDs of the module or modules containing the resource. Modules are listed starting with the inner-most nested module, and separated by /
.
In the following example, the resource was created from a module, moduleA
, that is nested inside a parent module, moduleB
.
+ * moduleA/moduleB
+ *
For more information, see Referencing resources in a module in the CloudFormation User Guide.
+ */ + LogicalIdHierarchy?: string; +} + +export namespace ModuleInfo { + export const filterSensitiveLog = (obj: ModuleInfo): any => ({ + ...obj, + }); +} + export enum Replacement { Conditional = "Conditional", False = "False", @@ -381,6 +412,31 @@ export enum Replacement { * AWS CloudFormation will perform on it if you execute this change set. */ export interface ResourceChange { + /** + *The action that AWS CloudFormation takes on the resource, such as Add
+ * (adds a new resource), Modify
(changes a resource), Remove
+ * (deletes a resource), Import
(imports a resource), or Dynamic
+ * (exact action for the resource cannot be determined).
The resource's logical ID, which is defined in the stack's template.
+ */ + LogicalResourceId?: string; + + /** + *The resource's physical ID (resource name). Resources that you are adding don't have + * physical IDs because they haven't been created.
+ */ + PhysicalResourceId?: string; + + /** + *The type of AWS CloudFormation resource, such as
+ * AWS::S3::Bucket
.
For the Modify
action, indicates whether AWS CloudFormation will replace
* the resource by creating a new one and deleting the old one. This value depends on the
@@ -398,25 +454,6 @@ export interface ResourceChange {
*/
Replacement?: Replacement | string;
- /**
- *
The resource's physical ID (resource name). Resources that you are adding don't have - * physical IDs because they haven't been created.
- */ - PhysicalResourceId?: string; - - /** - *The resource's logical ID, which is defined in the stack's template.
- */ - LogicalResourceId?: string; - - /** - *The action that AWS CloudFormation takes on the resource, such as Add
- * (adds a new resource), Modify
(changes a resource), Remove
- * (deletes a resource), Import
(imports a resource), or Dynamic
- * (exact action for the resource cannot be determined).
For the Modify
action, indicates which resource attribute is triggering
* this update, such as a change in the resource attribute's Metadata
,
@@ -432,15 +469,14 @@ export interface ResourceChange {
Details?: ResourceChangeDetail[];
/**
- *
The type of AWS CloudFormation resource, such as
- * AWS::S3::Bucket
.
The change set ID of the nested change set.
*/ - ResourceType?: string; + ChangeSetId?: string; /** - *The change set ID of the nested change set.
+ *Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
*/ - ChangeSetId?: string; + ModuleInfo?: ModuleInfo; } export namespace ResourceChange { @@ -519,35 +555,39 @@ export enum ExecutionStatus { */ export interface ChangeSetSummary { /** - *Descriptive information about the change set.
+ *The ID of the stack with which the change set is associated.
*/ - Description?: string; + StackId?: string; /** - *The state of the change set, such as CREATE_IN_PROGRESS
,
- * CREATE_COMPLETE
, or FAILED
.
The name of the stack with which the change set is associated.
*/ - Status?: ChangeSetStatus | string; + StackName?: string; /** - *The name of the change set.
+ *The ID of the change set.
*/ - ChangeSetName?: string; + ChangeSetId?: string; /** - *The name of the stack with which the change set is associated.
+ *The name of the change set.
*/ - StackName?: string; + ChangeSetName?: string; /** - *The start time when the change set was created, in UTC.
+ *If the change set execution status is AVAILABLE
, you can execute the
+ * change set. If you can’t execute the change set, the status indicates why. For example, a
+ * change set might be in an UNAVAILABLE
state because AWS CloudFormation is
+ * still creating it or in an OBSOLETE
state because the stack was already
+ * updated.
The ID of the stack with which the change set is associated.
+ *The state of the change set, such as CREATE_IN_PROGRESS
,
+ * CREATE_COMPLETE
, or FAILED
.
A description of the change set's status. For example, if your change set is in the @@ -556,34 +596,30 @@ export interface ChangeSetSummary { StatusReason?: string; /** - *
The parent change set ID.
+ *The start time when the change set was created, in UTC.
*/ - ParentChangeSetId?: string; + CreationTime?: Date; /** - *If the change set execution status is AVAILABLE
, you can execute the
- * change set. If you can’t execute the change set, the status indicates why. For example, a
- * change set might be in an UNAVAILABLE
state because AWS CloudFormation is
- * still creating it or in an OBSOLETE
state because the stack was already
- * updated.
Descriptive information about the change set.
*/ - ExecutionStatus?: ExecutionStatus | string; + Description?: string; /** - *The ID of the change set.
+ *Specifies the current setting of IncludeNestedStacks
for the change
+ * set.
The root change set ID.
+ *The parent change set ID.
*/ - RootChangeSetId?: string; + ParentChangeSetId?: string; /** - *Specifies the current setting of IncludeNestedStacks
for the change
- * set.
The root change set ID.
*/ - IncludeNestedStacks?: boolean; + RootChangeSetId?: string; } export namespace ChangeSetSummary { @@ -603,13 +639,29 @@ export enum ChangeSetType { */ export interface ContinueUpdateRollbackInput { /** - *A unique identifier for this ContinueUpdateRollback
request. Specify
- * this token if you plan to retry requests so that AWS CloudFormation knows that you're not
- * attempting to continue the rollback to a stack with the same name. You might retry
- * ContinueUpdateRollback
requests to ensure that AWS CloudFormation
- * successfully received them.
The name or the unique ID of the stack that you want to continue rolling + * back.
+ *Don't specify the name of a nested stack (a stack that was created by using the
+ * AWS::CloudFormation::Stack
resource). Instead, use this operation on the
+ * parent stack (the stack that contains the AWS::CloudFormation::Stack
+ * resource).
The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role + * that AWS CloudFormation assumes to roll back the stack. AWS CloudFormation uses the role's + * credentials to make calls on your behalf. AWS CloudFormation always uses this role for all + * future operations on the stack. As long as users have permission to operate on the stack, + * AWS CloudFormation uses this role even if the users don't have permission to pass it. + * Ensure that the role grants least privilege.
+ *If you don't specify a value, AWS CloudFormation uses the role that was previously + * associated with the stack. If no role is available, AWS CloudFormation uses a temporary + * session that is generated from your user credentials.
+ */ + RoleARN?: string; /** *A list of the logical IDs of the resources that AWS CloudFormation skips during the @@ -645,29 +697,13 @@ export interface ContinueUpdateRollbackInput { ResourcesToSkip?: string[]; /** - *
The name or the unique ID of the stack that you want to continue rolling - * back.
- *Don't specify the name of a nested stack (a stack that was created by using the
- * AWS::CloudFormation::Stack
resource). Instead, use this operation on the
- * parent stack (the stack that contains the AWS::CloudFormation::Stack
- * resource).
The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role - * that AWS CloudFormation assumes to roll back the stack. AWS CloudFormation uses the role's - * credentials to make calls on your behalf. AWS CloudFormation always uses this role for all - * future operations on the stack. As long as users have permission to operate on the stack, - * AWS CloudFormation uses this role even if the users don't have permission to pass it. - * Ensure that the role grants least privilege.
- *If you don't specify a value, AWS CloudFormation uses the role that was previously - * associated with the stack. If no role is available, AWS CloudFormation uses a temporary - * session that is generated from your user credentials.
+ *A unique identifier for this ContinueUpdateRollback
request. Specify
+ * this token if you plan to retry requests so that AWS CloudFormation knows that you're not
+ * attempting to continue the rollback to a stack with the same name. You might retry
+ * ContinueUpdateRollback
requests to ensure that AWS CloudFormation
+ * successfully received them.
During a stack update, use the existing parameter value that the stack is using for a
- * given parameter key. If you specify true
, do not specify a parameter
- * value.
The key associated with the parameter. If you don't specify a key and value for a + * particular parameter, AWS CloudFormation uses the default value that is specified in your + * template.
*/ - UsePreviousValue?: boolean; + ParameterKey?: string; /** *The input value associated with the parameter.
*/ ParameterValue?: string; + /** + *During a stack update, use the existing parameter value that the stack is using for a
+ * given parameter key. If you specify true
, do not specify a parameter
+ * value.
Read-only. The value that corresponds to a Systems Manager parameter key. This field
* is returned only for
* SSM
parameter types in the template.
The key associated with the parameter. If you don't specify a key and value for a - * particular parameter, AWS CloudFormation uses the default value that is specified in your - * template.
- */ - ParameterKey?: string; } export namespace Parameter { @@ -728,13 +764,6 @@ export namespace Parameter { *Describes the target resource of an import operation.
*/ export interface ResourceToImport { - /** - *A key-value pair that identifies the target resource. The key is an identifier property
- * (for example, BucketName
for AWS::S3::Bucket
resources) and the
- * value is the actual property value (for example, MyS3Bucket
).
The type of resource to import into your stack, such as AWS::S3::Bucket
. For a list of supported resource types, see Resources that support import operations in the AWS CloudFormation User Guide.
The logical ID of the target resource as specified in the template.
*/ LogicalResourceId: string | undefined; + + /** + *A key-value pair that identifies the target resource. The key is an identifier property
+ * (for example, BucketName
for AWS::S3::Bucket
resources) and the
+ * value is the actual property value (for example, MyS3Bucket
).
- * Required. A string containing the value for this tag. You can specify - * a maximum of 256 characters for a tag value.
- */ - Value: string | undefined; - /** *
* Required. A string used to identify this tag. You can specify a
@@ -857,6 +886,13 @@ export interface Tag {
* reserved prefix: aws:
.
+ * Required. A string containing the value for this tag. You can specify + * a maximum of 256 characters for a tag value.
+ */ + Value: string | undefined; } export namespace Tag { @@ -870,32 +906,31 @@ export namespace Tag { */ export interface CreateChangeSetInput { /** - *The rollback triggers for AWS CloudFormation to monitor during stack creation and - * updating operations, and for the specified monitoring period afterwards.
- */ - RollbackConfiguration?: RollbackConfiguration; - - /** - *The name of the change set. The name must be unique among all change sets that are - * associated with the specified stack.
- *A change set name can contain only alphanumeric, case sensitive characters and - * hyphens. It must start with an alphabetic character and cannot exceed 128 - * characters.
+ *The name or the unique ID of the stack for which you are creating a change set. AWS + * CloudFormation generates the change set by comparing this stack's information with the + * information that you submit, such as a modified template or different parameter input + * values.
*/ - ChangeSetName: string | undefined; + StackName: string | undefined; /** - *Key-value pairs to associate with this stack. AWS CloudFormation also propagates - * these tags to resources in the stack. You can specify a maximum of 50 tags.
+ *A structure that contains the body of the revised template, with a minimum length of + * 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation generates the change set by + * comparing this template with the template of the stack that you specified.
+ *Conditional: You must specify only TemplateBody
or
+ * TemplateURL
.
The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) - * topics that AWS CloudFormation associates with the stack. To remove all associated - * notification topics, specify an empty list.
+ *The location of the file that contains the revised template. The URL must point to a + * template (max size: 460,800 bytes) that is located in an S3 bucket. AWS CloudFormation + * generates the change set by comparing this template with the stack that you + * specified.
+ *Conditional: You must specify only TemplateBody
or
+ * TemplateURL
.
Whether to reuse the template that is associated with the stack to create the change @@ -903,76 +938,12 @@ export interface CreateChangeSetInput { */ UsePreviousTemplate?: boolean; - /** - *
A description to help you identify this change set.
- */ - Description?: string; - /** *A list of Parameter
structures that specify input parameters for the
* change set. For more information, see the Parameter data type.
The template resource types that you have permissions to work with if you execute
- * this change set, such as AWS::EC2::Instance
, AWS::EC2::*
, or
- * Custom::MyCustomInstance
.
If the list of resource types doesn't include a resource type that you're updating, - * the stack update fails. By default, AWS CloudFormation grants permissions to all resource - * types. AWS Identity and Access Management (IAM) uses this parameter for condition keys in - * IAM policies for AWS CloudFormation. For more information, see Controlling Access with - * AWS Identity and Access Management in the AWS CloudFormation User - * Guide.
- */ - ResourceTypes?: string[]; - - /** - *A unique identifier for this CreateChangeSet
request. Specify this token
- * if you plan to retry requests so that AWS CloudFormation knows that you're not attempting
- * to create another change set with the same name. You might retry
- * CreateChangeSet
requests to ensure that AWS CloudFormation successfully
- * received them.
The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role - * that AWS CloudFormation assumes when executing the change set. AWS CloudFormation uses the - * role's credentials to make calls on your behalf. AWS CloudFormation uses this role for all - * future operations on the stack. As long as users have permission to operate on the stack, - * AWS CloudFormation uses this role even if the users don't have permission to pass it. - * Ensure that the role grants least privilege.
- *If you don't specify a value, AWS CloudFormation uses the role that was previously - * associated with the stack. If no role is available, AWS CloudFormation uses a temporary - * session that is generated from your user credentials.
- */ - RoleARN?: string; - - /** - *The type of change set operation. To create a change set for a new stack, specify
- * CREATE
. To create a change set for an existing stack, specify
- * UPDATE
. To create a change set for an import operation, specify
- * IMPORT
.
If you create a change set for a new stack, AWS Cloudformation creates a stack with a
- * unique stack ID, but no template or resources. The stack will be in the
- * REVIEW_IN_PROGRESS
- * state until you execute the change
- * set.
By default, AWS CloudFormation specifies UPDATE
. You can't use the
- * UPDATE
type to create a change set for a new stack or the
- * CREATE
type to create a change set for an existing stack.
The name or the unique ID of the stack for which you are creating a change set. AWS - * CloudFormation generates the change set by comparing this stack's information with the - * information that you submit, such as a modified template or different parameter input - * values.
- */ - StackName: string | undefined; - /** *In some cases, you must explicitly acknowledge that your stack template contains certain * capabilities in order for AWS CloudFormation to create the stack.
@@ -1078,63 +1049,128 @@ export interface CreateChangeSetInput { Capabilities?: (Capability | string)[]; /** - *A structure that contains the body of the revised template, with a minimum length of - * 1 byte and a maximum length of 51,200 bytes. AWS CloudFormation generates the change set by - * comparing this template with the template of the stack that you specified.
- *Conditional: You must specify only TemplateBody
or
- * TemplateURL
.
The template resource types that you have permissions to work with if you execute
+ * this change set, such as AWS::EC2::Instance
, AWS::EC2::*
, or
+ * Custom::MyCustomInstance
.
If the list of resource types doesn't include a resource type that you're updating, + * the stack update fails. By default, AWS CloudFormation grants permissions to all resource + * types. AWS Identity and Access Management (IAM) uses this parameter for condition keys in + * IAM policies for AWS CloudFormation. For more information, see Controlling Access with + * AWS Identity and Access Management in the AWS CloudFormation User + * Guide.
*/ - TemplateBody?: string; + ResourceTypes?: string[]; /** - *The location of the file that contains the revised template. The URL must point to a - * template (max size: 460,800 bytes) that is located in an S3 bucket. AWS CloudFormation - * generates the change set by comparing this template with the stack that you - * specified.
- *Conditional: You must specify only TemplateBody
or
- * TemplateURL
.
The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role + * that AWS CloudFormation assumes when executing the change set. AWS CloudFormation uses the + * role's credentials to make calls on your behalf. AWS CloudFormation uses this role for all + * future operations on the stack. As long as users have permission to operate on the stack, + * AWS CloudFormation uses this role even if the users don't have permission to pass it. + * Ensure that the role grants least privilege.
+ *If you don't specify a value, AWS CloudFormation uses the role that was previously + * associated with the stack. If no role is available, AWS CloudFormation uses a temporary + * session that is generated from your user credentials.
*/ - TemplateURL?: string; + RoleARN?: string; /** - *Creates a change set for the all nested stacks specified in the template. The default
- * behavior of this action is set to False
. To include nested sets in a change
- * set, specify True
.
The rollback triggers for AWS CloudFormation to monitor during stack creation and + * updating operations, and for the specified monitoring period afterwards.
*/ - IncludeNestedStacks?: boolean; + RollbackConfiguration?: RollbackConfiguration; /** - *The resources to import into your stack.
+ *The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) + * topics that AWS CloudFormation associates with the stack. To remove all associated + * notification topics, specify an empty list.
*/ - ResourcesToImport?: ResourceToImport[]; -} + NotificationARNs?: string[]; -export namespace CreateChangeSetInput { - export const filterSensitiveLog = (obj: CreateChangeSetInput): any => ({ - ...obj, - }); -} + /** + *Key-value pairs to associate with this stack. AWS CloudFormation also propagates + * these tags to resources in the stack. You can specify a maximum of 50 tags.
+ */ + Tags?: Tag[]; -/** - *The output for the CreateChangeSet action.
- */ -export interface CreateChangeSetOutput { /** - *The unique ID of the stack.
+ *The name of the change set. The name must be unique among all change sets that are + * associated with the specified stack.
+ *A change set name can contain only alphanumeric, case sensitive characters and + * hyphens. It must start with an alphabetic character and cannot exceed 128 + * characters.
*/ - StackId?: string; + ChangeSetName: string | undefined; /** - *The Amazon Resource Name (ARN) of the change set.
+ *A unique identifier for this CreateChangeSet
request. Specify this token
+ * if you plan to retry requests so that AWS CloudFormation knows that you're not attempting
+ * to create another change set with the same name. You might retry
+ * CreateChangeSet
requests to ensure that AWS CloudFormation successfully
+ * received them.
A description to help you identify this change set.
+ */ + Description?: string; + + /** + *The type of change set operation. To create a change set for a new stack, specify
+ * CREATE
. To create a change set for an existing stack, specify
+ * UPDATE
. To create a change set for an import operation, specify
+ * IMPORT
.
If you create a change set for a new stack, AWS Cloudformation creates a stack with a
+ * unique stack ID, but no template or resources. The stack will be in the
+ * REVIEW_IN_PROGRESS
+ * state until you execute the change
+ * set.
By default, AWS CloudFormation specifies UPDATE
. You can't use the
+ * UPDATE
type to create a change set for a new stack or the
+ * CREATE
type to create a change set for an existing stack.
The resources to import into your stack.
+ */ + ResourcesToImport?: ResourceToImport[]; + + /** + *Creates a change set for the all nested stacks specified in the template. The default
+ * behavior of this action is set to False
. To include nested sets in a change
+ * set, specify True
.
The output for the CreateChangeSet action.
+ */ +export interface CreateChangeSetOutput { + /** + *The Amazon Resource Name (ARN) of the change set.
+ */ + Id?: string; + + /** + *The unique ID of the stack.
+ */ + StackId?: string; +} + +export namespace CreateChangeSetOutput { + export const filterSensitiveLog = (obj: CreateChangeSetOutput): any => ({ + ...obj, + }); +} /** *The template contains resources with capabilities that weren't specified in the @@ -1179,6 +1215,17 @@ export enum OnFailure { *
The input for CreateStack action.
*/ export interface CreateStackInput { + /** + *The name that is associated with the stack. The name must be unique in the Region in + * which you are creating the stack.
+ *A stack name can contain only alphanumeric characters (case sensitive) and + * hyphens. It must start with an alphabetic character and cannot be longer than 128 + * characters.
+ *Structure containing the template body with a minimum length of 1 byte and a maximum * length of 51,200 bytes. For more information, go to Template Anatomy @@ -1189,30 +1236,14 @@ export interface CreateStackInput { TemplateBody?: string; /** - *
Structure containing the stack policy body. For more information, go to Prevent Updates
- * to Stack Resources in the AWS CloudFormation User Guide.
- * You can specify either the StackPolicyBody
or the StackPolicyURL
- * parameter, but not both.
The template resource types that you have permissions to work with for this create
- * stack action, such as AWS::EC2::Instance
, AWS::EC2::*
, or
- * Custom::MyCustomInstance
. Use the following syntax to describe template
- * resource types: AWS::*
(for all AWS resource), Custom::*
(for all
- * custom resources), Custom::logical_ID
- *
(for a specific custom resource),
- * AWS::service_name::*
(for all resources of a
- * particular AWS service), and
- * AWS::service_name::resource_logical_ID
- *
(for a specific AWS resource).
If the list of resource types doesn't include a resource that you're creating, the - * stack creation fails. By default, AWS CloudFormation grants permissions to all resource - * types. AWS Identity and Access Management (IAM) uses this parameter for AWS - * CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.
+ *Location of file containing the template body. The URL must point to a template (max + * size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to + * the Template Anatomy + * in the AWS CloudFormation User Guide.
+ *Conditional: You must specify either the TemplateBody
or the
+ * TemplateURL
parameter, but not both.
A list of Parameter
structures that specify input parameters for the
@@ -1221,27 +1252,6 @@ export interface CreateStackInput {
*/
Parameters?: Parameter[];
- /**
- *
Whether to enable termination protection on the specified stack. If a user attempts - * to delete a stack with termination protection enabled, the operation fails and the stack - * remains unchanged. For more information, see Protecting a Stack From Being - * Deleted in the AWS CloudFormation User Guide. Termination protection is - * disabled on stacks by default.
- *For nested stacks, - * termination protection is set on the root stack and cannot be changed directly on the - * nested stack.
- */ - EnableTerminationProtection?: boolean; - - /** - *Determines what action will be taken if stack creation fails. This must be one of:
- * DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure
or
- * DisableRollback
, but not both.
Default: ROLLBACK
- *
Set to true
to disable rollback of the stack if stack creation failed.
* You can specify either DisableRollback
or OnFailure
, but not
@@ -1252,22 +1262,10 @@ export interface CreateStackInput {
DisableRollback?: boolean;
/**
- *
Key-value pairs to associate with this stack. AWS CloudFormation also propagates - * these tags to the resources created in the stack. A maximum number of 50 tags can be - * specified.
- */ - Tags?: Tag[]; - - /** - *The name that is associated with the stack. The name must be unique in the Region in - * which you are creating the stack.
- *A stack name can contain only alphanumeric characters (case sensitive) and - * hyphens. It must start with an alphabetic character and cannot be longer than 128 - * characters.
- *The rollback triggers for AWS CloudFormation to monitor during stack creation and + * updating operations, and for the specified monitoring period afterwards.
*/ - StackName: string | undefined; + RollbackConfiguration?: RollbackConfiguration; /** *The amount of time that can pass before the stack status becomes CREATE_FAILED; if @@ -1276,31 +1274,6 @@ export interface CreateStackInput { */ TimeoutInMinutes?: number; - /** - *
The rollback triggers for AWS CloudFormation to monitor during stack creation and - * updating operations, and for the specified monitoring period afterwards.
- */ - RollbackConfiguration?: RollbackConfiguration; - - /** - *A unique identifier for this CreateStack
request. Specify this token if
- * you plan to retry requests so that AWS CloudFormation knows that you're not attempting to
- * create a stack with the same name. You might retry CreateStack
requests to
- * ensure that AWS CloudFormation successfully received them.
All events triggered by a given stack operation are assigned the same client request
- * token, which you can use to track operations. For example, if you execute a
- * CreateStack
operation with the token token1
, then all the
- * StackEvents
generated by that operation will have
- * ClientRequestToken
set as token1
.
In the console, stack operations display the client request token on the Events tab.
- * Stack operations that are initiated from the console use the token format
- * Console-StackOperation-ID, which helps you easily identify the
- * stack operation . For example, if you create a stack using the console, each stack event
- * would be assigned the same token in the following format:
- * Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002
.
The Simple Notification Service (SNS) topic ARNs to publish stack related events. You * can find your SNS topic ARNs using the SNS console or your Command Line Interface @@ -1415,22 +1388,22 @@ export interface CreateStackInput { Capabilities?: (Capability | string)[]; /** - *
Location of file containing the template body. The URL must point to a template (max - * size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to - * the Template Anatomy - * in the AWS CloudFormation User Guide.
- *Conditional: You must specify either the TemplateBody
or the
- * TemplateURL
parameter, but not both.
Location of a file containing the stack policy. The URL must point to a policy
- * (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. You can
- * specify either the StackPolicyBody
or the StackPolicyURL
- * parameter, but not both.
The template resource types that you have permissions to work with for this create
+ * stack action, such as AWS::EC2::Instance
, AWS::EC2::*
, or
+ * Custom::MyCustomInstance
. Use the following syntax to describe template
+ * resource types: AWS::*
(for all AWS resource), Custom::*
(for all
+ * custom resources), Custom::logical_ID
+ *
(for a specific custom resource),
+ * AWS::service_name::*
(for all resources of a
+ * particular AWS service), and
+ * AWS::service_name::resource_logical_ID
+ *
(for a specific AWS resource).
If the list of resource types doesn't include a resource that you're creating, the + * stack creation fails. By default, AWS CloudFormation grants permissions to all resource + * types. AWS Identity and Access Management (IAM) uses this parameter for AWS + * CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.
*/ - StackPolicyURL?: string; + ResourceTypes?: string[]; /** *The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role @@ -1444,6 +1417,69 @@ export interface CreateStackInput { * session that is generated from your user credentials.
*/ RoleARN?: string; + + /** + *Determines what action will be taken if stack creation fails. This must be one of:
+ * DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure
or
+ * DisableRollback
, but not both.
Default: ROLLBACK
+ *
Structure containing the stack policy body. For more information, go to Prevent Updates
+ * to Stack Resources in the AWS CloudFormation User Guide.
+ * You can specify either the StackPolicyBody
or the StackPolicyURL
+ * parameter, but not both.
Location of a file containing the stack policy. The URL must point to a policy
+ * (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. You can
+ * specify either the StackPolicyBody
or the StackPolicyURL
+ * parameter, but not both.
Key-value pairs to associate with this stack. AWS CloudFormation also propagates + * these tags to the resources created in the stack. A maximum number of 50 tags can be + * specified.
+ */ + Tags?: Tag[]; + + /** + *A unique identifier for this CreateStack
request. Specify this token if
+ * you plan to retry requests so that AWS CloudFormation knows that you're not attempting to
+ * create a stack with the same name. You might retry CreateStack
requests to
+ * ensure that AWS CloudFormation successfully received them.
All events triggered by a given stack operation are assigned the same client request
+ * token, which you can use to track operations. For example, if you execute a
+ * CreateStack
operation with the token token1
, then all the
+ * StackEvents
generated by that operation will have
+ * ClientRequestToken
set as token1
.
In the console, stack operations display the client request token on the Events tab.
+ * Stack operations that are initiated from the console use the token format
+ * Console-StackOperation-ID, which helps you easily identify the
+ * stack operation . For example, if you create a stack using the console, each stack event
+ * would be assigned the same token in the following format:
+ * Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002
.
Whether to enable termination protection on the specified stack. If a user attempts + * to delete a stack with termination protection enabled, the operation fails and the stack + * remains unchanged. For more information, see Protecting a Stack From Being + * Deleted in the AWS CloudFormation User Guide. Termination protection is + * disabled on stacks by default.
+ *For nested stacks, + * termination protection is set on the root stack and cannot be changed directly on the + * nested stack.
+ */ + EnableTerminationProtection?: boolean; } export namespace CreateStackInput { @@ -1469,19 +1505,19 @@ export namespace CreateStackOutput { } /** - *[Service-managed
permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization master account, even if the master account is in your organization or in an OU in your organization.
[Service-managed
permissions] The AWS Organizations accounts to which StackSets deploys. StackSets does not deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization.
For update operations, you can specify either Accounts
or OrganizationalUnitIds
. For create and delete operations, specify OrganizationalUnitIds
.
The organization root ID or organizational unit (OU) IDs to which StackSets deploys.
+ *The names of one or more AWS accounts for which you want to deploy stack set updates.
*/ - OrganizationalUnitIds?: string[]; + Accounts?: string[]; /** - *The names of one or more AWS accounts for which you want to deploy stack set updates.
+ *The organization root ID or organizational unit (OU) IDs to which StackSets deploys.
*/ - Accounts?: string[]; + OrganizationalUnitIds?: string[]; } export namespace DeploymentTargets { @@ -1501,21 +1537,6 @@ export interface StackSetOperationPreferences { */ RegionOrder?: string[]; - /** - *The maximum percentage of accounts in which to perform this operation at one - * time.
- *When calculating the number of accounts based on the specified percentage, AWS - * CloudFormation rounds down to the next whole number. This is true except in cases where - * rounding down would result is zero. In this case, CloudFormation sets the number as one - * instead.
- *Note that this setting lets you specify the maximum for - * operations. For large deployments, under certain circumstances the actual number of - * accounts acted upon concurrently may be lower due to service throttling.
- *Conditional: You must specify either MaxConcurrentCount
or
- * MaxConcurrentPercentage
, but not both.
The number of accounts, per Region, for which this operation can fail before AWS
* CloudFormation stops the operation in that Region. If the operation is stopped in a Region,
@@ -1549,6 +1570,21 @@ export interface StackSetOperationPreferences {
* MaxConcurrentPercentage
, but not both.
The maximum percentage of accounts in which to perform this operation at one + * time.
+ *When calculating the number of accounts based on the specified percentage, AWS + * CloudFormation rounds down to the next whole number. This is true except in cases where + * rounding down would result is zero. In this case, CloudFormation sets the number as one + * instead.
+ *Note that this setting lets you specify the maximum for + * operations. For large deployments, under certain circumstances the actual number of + * accounts acted upon concurrently may be lower due to service throttling.
+ *Conditional: You must specify either MaxConcurrentCount
or
+ * MaxConcurrentPercentage
, but not both.
The name or unique ID of the stack set that you want to create stack instances + * from.
+ */ + StackSetName: string | undefined; + + /** + *[Self-managed
permissions] The names of one or more AWS accounts that you want to create stack instances in the
+ * specified Region(s) for.
You can specify Accounts
or DeploymentTargets
, but not both.
[Service-managed
permissions] The AWS Organizations accounts for which to create stack instances in the specified Regions.
You can specify Accounts
or DeploymentTargets
, but not both.
The names of one or more Regions where you want to create stack instances using the * specified AWS account(s).
@@ -1610,13 +1665,6 @@ export interface CreateStackInstancesInput { */ OperationPreferences?: StackSetOperationPreferences; - /** - *[Self-managed
permissions] The names of one or more AWS accounts that you want to create stack instances in the
- * specified Region(s) for.
You can specify Accounts
or DeploymentTargets
, but not both.
The unique identifier for this stack set operation.
*The operation ID also functions as an idempotency token, to ensure that AWS
@@ -1628,18 +1676,6 @@ export interface CreateStackInstancesInput {
* instances whose status is OUTDATED
.
The name or unique ID of the stack set that you want to create stack instances - * from.
- */ - StackSetName: string | undefined; - - /** - *[Service-managed
permissions] The AWS Organizations accounts for which to create stack instances in the specified Regions.
You can specify Accounts
or DeploymentTargets
, but not both.
The structure that contains the template body, with a minimum length of 1 byte and a - * maximum length of 51,200 bytes. For more information, see Template Anatomy - * in the AWS CloudFormation User Guide.
- *Conditional: You must specify either the TemplateBody or the TemplateURL parameter, - * but not both.
+ *The name to associate with the stack set. The name must be unique in the Region where + * you create your stack set.
+ *A stack name can contain only alphanumeric characters (case-sensitive) and + * hyphens. It must start with an alphabetic character and can't be longer than 128 + * characters.
+ *The key-value pairs to associate with this stack set and the stacks created from it. - * AWS CloudFormation also propagates these tags to supported resources that are created in - * the stacks. A maximum number of 50 tags can be specified.
- *If you specify tags as part of a CreateStackSet
action, AWS
- * CloudFormation checks to see if you have the required IAM permission to tag resources. If
- * you don't, the entire CreateStackSet
action fails with an access
- * denied
error, and the stack set is not created.
A description of the stack set. You can use the description to identify the stack + * set's purpose or other important information.
+ */ + Description?: string; + + /** + *The structure that contains the template body, with a minimum length of 1 byte and a + * maximum length of 51,200 bytes. For more information, see Template Anatomy + * in the AWS CloudFormation User Guide.
+ *Conditional: You must specify either the TemplateBody or the TemplateURL parameter, + * but not both.
*/ - Tags?: Tag[]; + TemplateBody?: string; /** *The location of the file that contains the template body. The URL must point to a @@ -1791,19 +1833,6 @@ export interface CreateStackSetInput { */ Parameters?: Parameter[]; - /** - *
Describes how the IAM roles required for stack set operations are created. By default, SELF-MANAGED
is specified.
With self-managed
permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see Grant Self-Managed Stack Set Permissions.
With service-managed
permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations. For more information, see Grant Service-Managed Stack Set Permissions.
In some cases, you must explicitly acknowledge that your stack set template contains * certain capabilities in order for AWS CloudFormation to create the stack set and related stack @@ -1903,15 +1932,25 @@ export interface CreateStackSetInput { Capabilities?: (Capability | string)[]; /** - *
A description of the stack set. You can use the description to identify the stack - * set's purpose or other important information.
+ *The key-value pairs to associate with this stack set and the stacks created from it. + * AWS CloudFormation also propagates these tags to supported resources that are created in + * the stacks. A maximum number of 50 tags can be specified.
+ *If you specify tags as part of a CreateStackSet
action, AWS
+ * CloudFormation checks to see if you have the required IAM permission to tag resources. If
+ * you don't, the entire CreateStackSet
action fails with an access
+ * denied
error, and the stack set is not created.
Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to the target organization or organizational unit (OU). Specify only if PermissionModel
is SERVICE_MANAGED
.
The Amazon Resource Number (ARN) of the IAM role to use to create this stack set.
+ *Specify an IAM role only if you are using customized administrator roles to control + * which users or groups can manage specific stack sets within the same administrator account. + * For more information, see Prerequisites: + * Granting Permissions for Stack Set Operations in the + * AWS CloudFormation User Guide.
*/ - AutoDeployment?: AutoDeployment; + AdministrationRoleARN?: string; /** *The name of the IAM execution role to use to create the stack set. If you do not specify @@ -1924,6 +1963,24 @@ export interface CreateStackSetInput { */ ExecutionRoleName?: string; + /** + *
Describes how the IAM roles required for stack set operations are created. By default, SELF-MANAGED
is specified.
With self-managed
permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see Grant Self-Managed Stack Set Permissions.
With service-managed
permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations. For more information, see Grant Service-Managed Stack Set Permissions.
Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to the target organization or organizational unit (OU). Specify only if PermissionModel
is SERVICE_MANAGED
.
A unique identifier for this CreateStackSet
request. Specify this token
* if you plan to retry requests so that AWS CloudFormation knows that you're not attempting
@@ -1933,27 +1990,6 @@ export interface CreateStackSetInput {
*
The Amazon Resource Number (ARN) of the IAM role to use to create this stack set.
- *Specify an IAM role only if you are using customized administrator roles to control - * which users or groups can manage specific stack sets within the same administrator account. - * For more information, see Prerequisites: - * Granting Permissions for Stack Set Operations in the - * AWS CloudFormation User Guide.
- */ - AdministrationRoleARN?: string; - - /** - *The name to associate with the stack set. The name must be unique in the Region where - * you create your stack set.
- *A stack name can contain only alphanumeric characters (case-sensitive) and - * hyphens. It must start with an alphabetic character and can't be longer than 128 - * characters.
- *The input for the DeleteChangeSet action.
*/ export interface DeleteChangeSetInput { - /** - *If you specified the name of a change set to delete, specify the stack name or ID - * (ARN) that is associated with it.
- */ - StackName?: string; - /** *The name or Amazon Resource Name (ARN) of the change set that you want to * delete.
*/ ChangeSetName: string | undefined; + + /** + *If you specified the name of a change set to delete, specify the stack name or ID + * (ARN) that is associated with it.
+ */ + StackName?: string; } export namespace DeleteChangeSetInput { @@ -2045,6 +2081,11 @@ export namespace InvalidChangeSetStatusException { *The input for DeleteStack action.
*/ export interface DeleteStackInput { + /** + *The name or the unique stack ID that is associated with the stack.
+ */ + StackName: string | undefined; + /** *For stacks in the DELETE_FAILED
state, a list of resource logical IDs
* that are associated with the resources you want to retain. During deletion, AWS
@@ -2082,11 +2123,6 @@ export interface DeleteStackInput {
* Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002
.
The name or the unique stack ID that is associated with the stack.
- */ - StackName: string | undefined; } export namespace DeleteStackInput { @@ -2097,32 +2133,32 @@ export namespace DeleteStackInput { export interface DeleteStackInstancesInput { /** - *The Regions where you want to delete stack set instances.
+ *The name or unique ID of the stack set that you want to delete stack instances + * for.
*/ - Regions: string[] | undefined; + StackSetName: string | undefined; /** - *The unique identifier for this stack set operation.
- *If you don't specify an operation ID, the SDK generates one automatically.
- *The operation ID also functions as an idempotency token, to ensure that AWS - * CloudFormation performs the stack set operation only once, even if you retry the request - * multiple times. You can retry stack set operation requests to ensure that AWS - * CloudFormation successfully received them.
- *Repeating this stack set operation with a new operation ID retries all stack
- * instances whose status is OUTDATED
.
[Self-managed
permissions] The names of the AWS accounts that you want to delete stack instances for.
You can specify Accounts
or DeploymentTargets
, but not both.
Preferences for how AWS CloudFormation performs this stack set operation.
+ *[Service-managed
permissions] The AWS Organizations accounts from which to delete stack instances.
You can specify Accounts
or DeploymentTargets
, but not both.
[Self-managed
permissions] The names of the AWS accounts that you want to delete stack instances for.
You can specify Accounts
or DeploymentTargets
, but not both.
The Regions where you want to delete stack set instances.
*/ - Accounts?: string[]; + Regions: string[] | undefined; + + /** + *Preferences for how AWS CloudFormation performs this stack set operation.
+ */ + OperationPreferences?: StackSetOperationPreferences; /** *Removes the stack instances from the specified stack set, but doesn't delete the @@ -2133,16 +2169,16 @@ export interface DeleteStackInstancesInput { RetainStacks: boolean | undefined; /** - *
The name or unique ID of the stack set that you want to delete stack instances - * for.
- */ - StackSetName: string | undefined; - - /** - *[Service-managed
permissions] The AWS Organizations accounts from which to delete stack instances.
You can specify Accounts
or DeploymentTargets
, but not both.
The unique identifier for this stack set operation.
+ *If you don't specify an operation ID, the SDK generates one automatically.
+ *The operation ID also functions as an idempotency token, to ensure that AWS + * CloudFormation performs the stack set operation only once, even if you retry the request + * multiple times. You can retry stack set operation requests to ensure that AWS + * CloudFormation successfully received them.
+ *Repeating this stack set operation with a new operation ID retries all stack
+ * instances whose status is OUTDATED
.
The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
+ *The Amazon Resource Name (ARN) of the type.
+ *Conditional: You must specify either TypeName
and Type
, or Arn
.
The kind of type.
@@ -2225,10 +2262,9 @@ export interface DeregisterTypeInput { TypeName?: string; /** - *The Amazon Resource Name (ARN) of the type.
- *Conditional: You must specify either TypeName
and Type
, or Arn
.
The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
*/ - Arn?: string; + VersionId?: string; } export namespace DeregisterTypeInput { @@ -2280,17 +2316,17 @@ export namespace DescribeAccountLimitsInput { *The output for the DescribeAccountLimits action.
*/ export interface DescribeAccountLimitsOutput { - /** - *If the output exceeds 1 MB in size, a string that identifies the next page of limits. - * If no additional page exists, this value is null.
- */ - NextToken?: string; - /** *An account limit structure that contain a list of AWS CloudFormation account limits * and their values.
*/ AccountLimits?: AccountLimit[]; + + /** + *If the output exceeds 1 MB in size, a string that identifies the next page of limits. + * If no additional page exists, this value is null.
+ */ + NextToken?: string; } export namespace DescribeAccountLimitsOutput { @@ -2309,17 +2345,17 @@ export interface DescribeChangeSetInput { */ ChangeSetName: string | undefined; - /** - *A string (provided by the DescribeChangeSet response output) that - * identifies the next page of information that you want to retrieve.
- */ - NextToken?: string; - /** *If you specified the name of a change set, specify the stack name or ID (ARN) of the * change set you want to describe.
*/ StackName?: string; + + /** + *A string (provided by the DescribeChangeSet response output) that + * identifies the next page of information that you want to retrieve.
+ */ + NextToken?: string; } export namespace DescribeChangeSetInput { @@ -2333,49 +2369,29 @@ export namespace DescribeChangeSetInput { */ export interface DescribeChangeSetOutput { /** - *A list of Change
structures that describes the resources AWS
- * CloudFormation changes if you execute the change set.
Information about the change set.
- */ - Description?: string; - - /** - *The current status of the change set, such as CREATE_IN_PROGRESS
,
- * CREATE_COMPLETE
, or FAILED
.
The ARNs of the Amazon Simple Notification Service (Amazon SNS) topics that will be - * associated with the stack if you execute the change set.
+ *The name of the change set.
*/ - NotificationARNs?: string[]; + ChangeSetName?: string; /** - *If the output exceeds 1 MB, a string that identifies the next page of changes. If - * there is no additional page, this value is null.
+ *The ARN of the change set.
*/ - NextToken?: string; + ChangeSetId?: string; /** - *Verifies if IncludeNestedStacks
is set to True
.
The ARN of the stack that is associated with the change set.
*/ - IncludeNestedStacks?: boolean; + StackId?: string; /** - *If you execute the change set, the list of capabilities that were explicitly - * acknowledged when the change set was created.
+ *The name of the stack that is associated with the change set.
*/ - Capabilities?: (Capability | string)[]; + StackName?: string; /** - *The name of the change set.
+ *Information about the change set.
*/ - ChangeSetName?: string; + Description?: string; /** *A list of Parameter
structures that describes the input parameters and
@@ -2384,10 +2400,9 @@ export interface DescribeChangeSetOutput {
Parameters?: Parameter[];
/**
- *
The rollback triggers for AWS CloudFormation to monitor during stack creation and - * updating operations, and for the specified monitoring period afterwards.
+ *The start time when the change set was created, in UTC.
*/ - RollbackConfiguration?: RollbackConfiguration; + CreationTime?: Date; /** *If the change set execution status is AVAILABLE
, you can execute the
@@ -2399,46 +2414,67 @@ export interface DescribeChangeSetOutput {
ExecutionStatus?: ExecutionStatus | string;
/**
- *
Specifies the change set ID of the parent change set in the current nested change set hierarchy.
+ *The current status of the change set, such as CREATE_IN_PROGRESS
,
+ * CREATE_COMPLETE
, or FAILED
.
The ARN of the change set.
+ *A description of the change set's status. For example, if your attempt to create a + * change set failed, AWS CloudFormation shows the error message.
*/ - ChangeSetId?: string; + StatusReason?: string; + + /** + *The ARNs of the Amazon Simple Notification Service (Amazon SNS) topics that will be + * associated with the stack if you execute the change set.
+ */ + NotificationARNs?: string[]; + + /** + *The rollback triggers for AWS CloudFormation to monitor during stack creation and + * updating operations, and for the specified monitoring period afterwards.
+ */ + RollbackConfiguration?: RollbackConfiguration; + + /** + *If you execute the change set, the list of capabilities that were explicitly + * acknowledged when the change set was created.
+ */ + Capabilities?: (Capability | string)[]; /** - *Specifies the change set ID of the root change set in the current nested change set hierarchy.
+ *If you execute the change set, the tags that will be associated with the + * stack.
*/ - RootChangeSetId?: string; + Tags?: Tag[]; /** - *The name of the stack that is associated with the change set.
+ *A list of Change
structures that describes the resources AWS
+ * CloudFormation changes if you execute the change set.
A description of the change set's status. For example, if your attempt to create a - * change set failed, AWS CloudFormation shows the error message.
+ *If the output exceeds 1 MB, a string that identifies the next page of changes. If + * there is no additional page, this value is null.
*/ - StatusReason?: string; + NextToken?: string; /** - *The ARN of the stack that is associated with the change set.
+ *Verifies if IncludeNestedStacks
is set to True
.
If you execute the change set, the tags that will be associated with the - * stack.
+ *Specifies the change set ID of the parent change set in the current nested change set hierarchy.
*/ - Tags?: Tag[]; + ParentChangeSetId?: string; /** - *The start time when the change set was created, in UTC.
+ *Specifies the change set ID of the root change set in the current nested change set hierarchy.
*/ - CreationTime?: Date; + RootChangeSetId?: string; } export namespace DescribeChangeSetOutput { @@ -2478,16 +2514,9 @@ export enum StackDriftStatus { export interface DescribeStackDriftDetectionStatusOutput { /** - *Total number of stack resources that have drifted. This is NULL until the drift
- * detection operation reaches a status of DETECTION_COMPLETE
. This value will be
- * 0 for stacks whose drift status is IN_SYNC
.
The reason the stack drift detection operation has its current status.
+ *The ID of the stack.
*/ - DetectionStatusReason?: string; + StackId: string | undefined; /** *The ID of the drift detection results of this operation.
@@ -2498,9 +2527,31 @@ export interface DescribeStackDriftDetectionStatusOutput { StackDriftDetectionId: string | undefined; /** - *Time at which the stack drift detection operation was initiated.
+ *Status of the stack's actual configuration compared to its expected configuration.
+ *
+ * DRIFTED
: The stack differs from its expected template
+ * configuration. A stack is considered to have drifted if one or more of its resources
+ * have drifted.
+ * NOT_CHECKED
: AWS CloudFormation has not checked if the stack differs from its
+ * expected template configuration.
+ * IN_SYNC
: The stack's actual configuration matches its expected
+ * template configuration.
+ * UNKNOWN
: This value is reserved for future use.
The status of the stack drift detection operation.
@@ -2530,36 +2581,21 @@ export interface DescribeStackDriftDetectionStatusOutput { DetectionStatus: StackDriftDetectionStatus | string | undefined; /** - *Status of the stack's actual configuration compared to its expected configuration.
- *
- * DRIFTED
: The stack differs from its expected template
- * configuration. A stack is considered to have drifted if one or more of its resources
- * have drifted.
- * NOT_CHECKED
: AWS CloudFormation has not checked if the stack differs from its
- * expected template configuration.
- * IN_SYNC
: The stack's actual configuration matches its expected
- * template configuration.
- * UNKNOWN
: This value is reserved for future use.
The reason the stack drift detection operation has its current status.
*/ - StackDriftStatus?: StackDriftStatus | string; + DetectionStatusReason?: string; /** - *The ID of the stack.
+ *Total number of stack resources that have drifted. This is NULL until the drift
+ * detection operation reaches a status of DETECTION_COMPLETE
. This value will be
+ * 0 for stacks whose drift status is IN_SYNC
.
Time at which the stack drift detection operation was initiated.
+ */ + Timestamp: Date | undefined; } export namespace DescribeStackDriftDetectionStatusOutput { @@ -2624,30 +2660,24 @@ export enum ResourceStatus { */ export interface StackEvent { /** - *Time the status was updated.
+ *The unique ID name of the instance of the stack.
*/ - Timestamp: Date | undefined; + StackId: string | undefined; /** - *Success/failure message associated with the resource.
+ *The unique ID of this event.
*/ - ResourceStatusReason?: string; + EventId: string | undefined; /** - *The token passed to the operation that generated this event.
- *All events triggered by a given stack operation are assigned the same client request
- * token, which you can use to track operations. For example, if you execute a
- * CreateStack
operation with the token token1
, then all the
- * StackEvents
generated by that operation will have
- * ClientRequestToken
set as token1
.
In the console, stack operations display the client request token on the Events tab.
- * Stack operations that are initiated from the console use the token format
- * Console-StackOperation-ID, which helps you easily identify the
- * stack operation . For example, if you create a stack using the console, each stack event
- * would be assigned the same token in the following format:
- * Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002
.
The name associated with a stack.
*/ - ClientRequestToken?: string; + StackName: string | undefined; + + /** + *The logical name of the resource specified in the template.
+ */ + LogicalResourceId?: string; /** *The name or unique identifier associated with the physical instance of the @@ -2656,14 +2686,15 @@ export interface StackEvent { PhysicalResourceId?: string; /** - *
BLOB of the properties used to create the resource.
+ *Type of resource. (For more information, go to AWS + * Resource Types Reference in the AWS CloudFormation User Guide.)
*/ - ResourceProperties?: string; + ResourceType?: string; /** - *The name associated with a stack.
+ *Time the status was updated.
*/ - StackName: string | undefined; + Timestamp: Date | undefined; /** *Current status of the resource.
@@ -2671,25 +2702,30 @@ export interface StackEvent { ResourceStatus?: ResourceStatus | string; /** - *The logical name of the resource specified in the template.
- */ - LogicalResourceId?: string; - - /** - *Type of resource. (For more information, go to AWS - * Resource Types Reference in the AWS CloudFormation User Guide.)
+ *Success/failure message associated with the resource.
*/ - ResourceType?: string; + ResourceStatusReason?: string; /** - *The unique ID of this event.
+ *BLOB of the properties used to create the resource.
*/ - EventId: string | undefined; + ResourceProperties?: string; /** - *The unique ID name of the instance of the stack.
+ *The token passed to the operation that generated this event.
+ *All events triggered by a given stack operation are assigned the same client request
+ * token, which you can use to track operations. For example, if you execute a
+ * CreateStack
operation with the token token1
, then all the
+ * StackEvents
generated by that operation will have
+ * ClientRequestToken
set as token1
.
In the console, stack operations display the client request token on the Events tab.
+ * Stack operations that are initiated from the console use the token format
+ * Console-StackOperation-ID, which helps you easily identify the
+ * stack operation . For example, if you create a stack using the console, each stack event
+ * would be assigned the same token in the following format:
+ * Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002
.
If the output exceeds 1 MB in size, a string that identifies the next page of events. - * If no additional page exists, this value is null.
+ *A list of StackEvents
structures.
A list of StackEvents
structures.
If the output exceeds 1 MB in size, a string that identifies the next page of events. + * If no additional page exists, this value is null.
*/ - StackEvents?: StackEvent[]; + NextToken?: string; } export namespace DescribeStackEventsOutput { @@ -2721,11 +2757,6 @@ export namespace DescribeStackEventsOutput { } export interface DescribeStackInstanceInput { - /** - *The name of a Region that's associated with this stack instance.
- */ - StackInstanceRegion: string | undefined; - /** *The name or the unique stack ID of the stack set that you want to get stack instance * information for.
@@ -2736,6 +2767,11 @@ export interface DescribeStackInstanceInput { *The ID of an AWS account that's associated with this stack instance.
*/ StackInstanceAccount: string | undefined; + + /** + *The name of a Region that's associated with this stack instance.
+ */ + StackInstanceRegion: string | undefined; } export namespace DescribeStackInstanceInput { @@ -2801,22 +2837,32 @@ export type StackInstanceStatus = "CURRENT" | "INOPERABLE" | "OUTDATED"; */ export interface StackInstance { /** - *A list of parameters from the stack set template whose values have been overridden in - * this stack instance.
+ *The name or unique ID of the stack set that the stack instance is associated + * with.
*/ - ParameterOverrides?: Parameter[]; + StackSetId?: string; /** - *The explanation for the specific status code that is assigned to this stack - * instance.
+ *The name of the AWS Region that the stack instance is associated with.
*/ - StatusReason?: string; + Region?: string; + + /** + *[Self-managed
permissions] The name of the AWS account that the stack instance is associated with.
The ID of the stack instance.
*/ StackId?: string; + /** + *A list of parameters from the stack set template whose values have been overridden in + * this stack instance.
+ */ + ParameterOverrides?: Parameter[]; + /** *The status of the stack instance, in terms of its synchronization with its associated * stack set.
@@ -2856,14 +2902,15 @@ export interface StackInstance { Status?: StackInstanceStatus | string; /** - *The name of the AWS Region that the stack instance is associated with.
+ *The detailed status of the stack instance.
*/ - Region?: string; + StackInstanceStatus?: StackInstanceComprehensiveStatus; /** - *The detailed status of the stack instance.
+ *The explanation for the specific status code that is assigned to this stack + * instance.
*/ - StackInstanceStatus?: StackInstanceComprehensiveStatus; + StatusReason?: string; /** *[Service-managed
permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
The name or unique ID of the stack set that the stack instance is associated - * with.
- */ - StackSetId?: string; - - /** - *[Self-managed
permissions] The name of the AWS account that the stack instance is associated with.
Most recent time when CloudFormation performed a drift detection operation on the stack
* instance. This value will be NULL
for any stack instance on which drift
@@ -2997,12 +3033,6 @@ export enum StackResourceDriftStatus {
* has drifted, from its expected configuration.
When AWS CloudFormation last checked if the resource had drifted from its expected - * configuration.
- */ - LastCheckTimestamp?: Date; - /** *Status of the resource's actual configuration compared to its expected * configuration
@@ -3032,6 +3062,12 @@ export interface StackResourceDriftInformation { * */ StackResourceDriftStatus: StackResourceDriftStatus | string | undefined; + + /** + *When AWS CloudFormation last checked if the resource had drifted from its expected + * configuration.
+ */ + LastCheckTimestamp?: Date; } export namespace StackResourceDriftInformation { @@ -3040,19 +3076,30 @@ export namespace StackResourceDriftInformation { }); } -/** - *Contains detailed information about the specified stack resource.
- */ -export interface StackResourceDetail { +/** + *Contains detailed information about the specified stack resource.
+ */ +export interface StackResourceDetail { + /** + *The name associated with the stack.
+ */ + StackName?: string; + + /** + *Unique identifier of the stack.
+ */ + StackId?: string; + /** - *User defined description associated with the resource.
+ *The logical name of the resource specified in the template.
*/ - Description?: string; + LogicalResourceId: string | undefined; /** - *Time the status was updated.
+ *The name or unique identifier that corresponds to a physical instance ID of a + * resource supported by AWS CloudFormation.
*/ - LastUpdatedTimestamp: Date | undefined; + PhysicalResourceId?: string; /** *Type of resource. ((For more information, go to AWS
@@ -3061,15 +3108,25 @@ export interface StackResourceDetail {
ResourceType: string | undefined;
/**
- * Success/failure message associated with the resource. Time the status was updated. Current status of the resource. Success/failure message associated with the resource. User defined description associated with the resource. The content of the The name or unique identifier that corresponds to a physical instance ID of a
- * resource supported by AWS CloudFormation. Information about whether the resource's actual configuration differs, or has
* drifted, from its expected configuration, as defined in the stack
@@ -3092,19 +3143,9 @@ export interface StackResourceDetail {
DriftInformation?: StackResourceDriftInformation;
/**
- * The logical name of the resource specified in the template. Unique identifier of the stack. The name associated with the stack. Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template. A string that identifies the next page of stack resource drift results. The resource drift status values to use as filters for the resource drift results
* returned. A string that identifies the next page of stack resource drift results. The maximum number of results to be returned with a single call. If the number of
* available results exceeds this maximum, the response includes a The expected property value of the resource property, as defined in the stack
+ * template and any values specified as template parameters. The actual property value of the resource property. The type of property difference. The actual property value of the resource property. The expected property value of the resource property, as defined in the stack
- * template and any values specified as template parameters. A collection of the resource properties whose actual values differ from their
- * expected values. These will be present only for resources whose
- * The ID of the stack. The logical name of the resource specified in the template. Time at which AWS CloudFormation performed drift detection on the stack resource. The name or unique identifier that corresponds to a physical instance ID of a
+ * resource supported by AWS CloudFormation. Context information that enables AWS CloudFormation to uniquely identify a resource. AWS CloudFormation uses
+ * context key-value pairs in cases where a resource's logical and physical IDs are not enough
+ * to uniquely identify that resource. Each context key-value pair specifies a unique resource
+ * that contains the targeted resource. The type of the resource. A JSON structure containing the expected property values of the stack resource, as
+ * defined in the stack template and any values specified as template parameters. For resources whose A JSON structure containing the actual property values of the stack
@@ -3308,9 +3368,12 @@ export interface StackResourceDrift {
ActualProperties?: string;
/**
- * The ID of the stack. A collection of the resource properties whose actual values differ from their
+ * expected values. These will be present only for resources whose
+ * Status of the resource's actual configuration compared to its expected
@@ -3341,31 +3404,14 @@ export interface StackResourceDrift {
StackResourceDriftStatus: StackResourceDriftStatus | string | undefined;
/**
- * The name or unique identifier that corresponds to a physical instance ID of a
- * resource supported by AWS CloudFormation. A JSON structure containing the expected property values of the stack resource, as
- * defined in the stack template and any values specified as template parameters. For resources whose Context information that enables AWS CloudFormation to uniquely identify a resource. AWS CloudFormation uses
- * context key-value pairs in cases where a resource's logical and physical IDs are not enough
- * to uniquely identify that resource. Each context key-value pair specifies a unique resource
- * that contains the targeted resource. Time at which AWS CloudFormation performed drift detection on the stack resource. The type of the resource. Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template. The input for DescribeStackResources action. The name or unique identifier that corresponds to a physical instance ID of a
- * resource supported by AWS CloudFormation. For example, for an Amazon Elastic Compute Cloud (EC2) instance,
- * Required: Conditional. If you do not specify Default: There is no default value. The name or the unique stack ID that is associated with the stack, which are not
* always interchangeable: Default: There is no default value. The name or unique identifier that corresponds to a physical instance ID of a
+ * resource supported by AWS CloudFormation. For example, for an Amazon Elastic Compute Cloud (EC2) instance,
+ * Required: Conditional. If you do not specify Default: There is no default value. User defined description associated with the resource. The name associated with the stack. Success/failure message associated with the resource. Unique identifier of the stack. Time the status was updated. The logical name of the resource specified in the template. The name associated with the stack. The name or unique identifier that corresponds to a physical instance ID of a
+ * resource supported by AWS CloudFormation. Unique identifier of the stack. Type of resource. (For more information, go to AWS
+ * Resource Types Reference in the AWS CloudFormation User Guide.) Time the status was updated. Current status of the resource. The logical name of the resource specified in the template. Success/failure message associated with the resource. Type of resource. (For more information, go to AWS
- * Resource Types Reference in the AWS CloudFormation User Guide.) User defined description associated with the resource. Information about whether the resource's actual configuration differs, or has
@@ -3505,10 +3557,9 @@ export interface StackResource {
DriftInformation?: StackResourceDriftInformation;
/**
- * The name or unique identifier that corresponds to a physical instance ID of a
- * resource supported by AWS CloudFormation. Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template. The value associated with the output. User defined description associated with the output. The name of the export associated with the output. The value associated with the output. The Stack data type. The name associated with the stack. Unique identifier of the stack. A list of output structures. A list of Information on whether a stack's actual configuration differs, or has
- * drifted, from it's expected configuration, as defined in the stack
- * template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration
- * Changes to Stacks and Resources. The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role
- * that is associated with the stack. During a stack operation, AWS CloudFormation uses this
- * role's credentials to make calls on your behalf. For nested stacks--stacks created as resources for another stack--the stack ID of the
- * direct parent of this stack. For the first level of nested stacks, the root stack is also
- * the parent stack. For more information, see Working with Nested Stacks in the
- * AWS CloudFormation User Guide. SNS topic ARNs to which stack related events are published. The name associated with the stack. The unique ID of the change set. The capabilities allowed in the stack. A user-defined description associated with the stack. The amount of time within which stack creation should complete. A list of The time at which the stack was created. The time the stack was deleted. The time the stack was last updated. This field will only be returned if the stack
@@ -3743,6 +3765,17 @@ export interface Stack {
*/
LastUpdatedTime?: Date;
+ /**
+ * The rollback triggers for AWS CloudFormation to monitor during stack creation and
+ * updating operations, and for the specified monitoring period afterwards. Current status of the stack. Success/failure message associated with the stack status. A list of SNS topic ARNs to which stack related events are published. The time at which the stack was created. The amount of time within which stack creation should complete. Current status of the stack. The capabilities allowed in the stack. A user-defined description associated with the stack. A list of output structures. The time the stack was deleted. The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role
+ * that is associated with the stack. During a stack operation, AWS CloudFormation uses this
+ * role's credentials to make calls on your behalf. The rollback triggers for AWS CloudFormation to monitor during stack creation and
- * updating operations, and for the specified monitoring period afterwards. A list of Whether termination protection is enabled for the stack. For nested stacks--stacks created as resources for another stack--the stack ID of the
+ * direct parent of this stack. For the first level of nested stacks, the root stack is also
+ * the parent stack. For more information, see Working with Nested Stacks in the
+ * AWS CloudFormation User Guide. For nested stacks--stacks created as resources for another stack--the stack ID of the
* top-level stack to which the nested stack ultimately belongs. Information on whether a stack's actual configuration differs, or has
+ * drifted, from it's expected configuration, as defined in the stack
+ * template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration
+ * Changes to Stacks and Resources. If the output exceeds 1 MB in size, a string that identifies the next page of stacks.
- * If no additional page exists, this value is null. A list of stack structures. A list of stack structures. If the output exceeds 1 MB in size, a string that identifies the next page of stacks.
+ * If no additional page exists, this value is null.Metadata
attribute declared for the resource. For
* more information, see Metadata
@@ -3077,12 +3134,6 @@ export interface StackResourceDetail {
*/
Metadata?: string;
- /**
- * NextToken
@@ -3226,6 +3267,17 @@ export interface PropertyDifference {
*/
PropertyPath: string | undefined;
+ /**
+ *
@@ -3248,17 +3300,6 @@ export interface PropertyDifference {
*
*/
DifferenceType: DifferenceType | string | undefined;
-
- /**
- * StackResourceDriftStatus
is MODIFIED
.
- * StackResourceDriftStatus
is DELETED
,
+ * this structure will not be present. StackResourceDriftStatus
is MODIFIED
.
+ * StackResourceDriftStatus
is DELETED
,
- * this structure will not be present. PhysicalResourceId
corresponds to the InstanceId
. You can pass
- * the EC2 InstanceId
to DescribeStackResources
to find which stack
- * the instance belongs to and what other resources are part of the stack.PhysicalResourceId
, you
- * must specify StackName
.PhysicalResourceId
corresponds to the InstanceId
. You can pass
+ * the EC2 InstanceId
to DescribeStackResources
to find which stack
+ * the instance belongs to and what other resources are part of the stack.PhysicalResourceId
, you
+ * must specify StackName
.Parameter
structures.Parameter
structures.Tag
s that specify information about the stack.Tag
s that specify information about the stack.
The number of stack instances which match the expected template and parameter - * configuration of the stack set.
- */ - InSyncStackInstancesCount?: number; - - /** - *The number of stack instances for which the drift detection operation failed.
- */ - FailedStackInstancesCount?: number; - - /** - *Most recent time when CloudFormation performed a drift detection operation on the stack
- * set. This value will be NULL
for any stack set on which drift detection has
- * not yet been performed.
The number of stack instances that are currently being checked for drift.
- */ - InProgressStackInstancesCount?: number; - /** *Status of the stack set's actual configuration compared to its expected template and * parameter configuration. A stack set is considered to have drifted if one or more of its @@ -3927,34 +3955,6 @@ export interface StackSetDriftDetectionDetails { */ DriftStatus?: StackSetDriftStatus | string; - /** - *
The number of stack instances that have drifted from the expected template and parameter - * configuration of the stack set. A stack instance is considered to have drifted if one or - * more of the resources in the associated stack do not match their expected - * configuration.
- */ - DriftedStackInstancesCount?: number; - - /** - *The total number of stack instances belonging to this stack set.
- *The total number of stack instances is equal to the total of:
- *Stack instances that match the stack set configuration.
- *Stack instances that have drifted from the stack set configuration.
- *Stack instances where the drift detection operation has failed.
- *Stack instances currently being checked for drift.
- *The status of the stack set drift detection operation.
*Most recent time when CloudFormation performed a drift detection operation on the stack
+ * set. This value will be NULL
for any stack set on which drift detection has
+ * not yet been performed.
The total number of stack instances belonging to this stack set.
+ *The total number of stack instances is equal to the total of:
+ *Stack instances that match the stack set configuration.
+ *Stack instances that have drifted from the stack set configuration.
+ *Stack instances where the drift detection operation has failed.
+ *Stack instances currently being checked for drift.
+ *The number of stack instances that have drifted from the expected template and parameter + * configuration of the stack set. A stack instance is considered to have drifted if one or + * more of the resources in the associated stack do not match their expected + * configuration.
+ */ + DriftedStackInstancesCount?: number; + + /** + *The number of stack instances which match the expected template and parameter + * configuration of the stack set.
+ */ + InSyncStackInstancesCount?: number; + + /** + *The number of stack instances that are currently being checked for drift.
+ */ + InProgressStackInstancesCount?: number; + + /** + *The number of stack instances for which the drift detection operation failed.
+ */ + FailedStackInstancesCount?: number; } export namespace StackSetDriftDetectionDetails { @@ -4003,15 +4054,20 @@ export type StackSetStatus = "ACTIVE" | "DELETED"; */ export interface StackSet { /** - *A description of the stack set that you specify when the stack set is created or - * updated.
+ *The name that's associated with the stack set.
*/ - Description?: string; + StackSetName?: string; /** - *The name that's associated with the stack set.
+ *The ID of the stack set.
*/ - StackSetName?: string; + StackSetId?: string; + + /** + *A description of the stack set that you specify when the stack set is created or + * updated.
+ */ + Description?: string; /** *The status of the stack set.
@@ -4019,14 +4075,15 @@ export interface StackSet { Status?: StackSetStatus | string; /** - *A list of input parameters for a stack set.
+ *The structure that contains the body of the template that was used to create or + * update the stack set.
*/ - Parameters?: Parameter[]; + TemplateBody?: string; /** - *The Amazon Resource Number (ARN) of the stack set.
+ *A list of input parameters for a stack set.
*/ - StackSetARN?: string; + Parameters?: Parameter[]; /** *The capabilities that are allowed in the stack set. Some stack set templates might @@ -4036,6 +4093,17 @@ export interface StackSet { */ Capabilities?: (Capability | string)[]; + /** + *
A list of tags that specify information about the stack set. A maximum number of 50 + * tags can be specified.
+ */ + Tags?: Tag[]; + + /** + *The Amazon Resource Number (ARN) of the stack set.
+ */ + StackSetARN?: string; + /** *The Amazon Resource Number (ARN) of the IAM role used to create or update the stack * set.
@@ -4045,11 +4113,6 @@ export interface StackSet { */ AdministrationRoleARN?: string; - /** - *The ID of the stack set.
- */ - StackSetId?: string; - /** *The name of the IAM execution role used to create or update the stack set.
*Use customized execution roles to control which stack resources users and groups can @@ -4059,14 +4122,17 @@ export interface StackSet { ExecutionRoleName?: string; /** - *
[Service-managed
permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).
Detailed information about the drift status of the stack set.
+ *For stack sets, contains information about the last completed drift + * operation performed on the stack set. Information about drift operations currently in + * progress is not included.
*/ - AutoDeployment?: AutoDeployment; + StackSetDriftDetectionDetails?: StackSetDriftDetectionDetails; /** - *[Service-managed
permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
[Service-managed
permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).
Describes how the IAM roles required for stack set operations are created.
@@ -4076,30 +4142,15 @@ export interface StackSet { *With service-managed
permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations. For more information, see Grant Service-Managed Stack Set Permissions.
The structure that contains the body of the template that was used to create or - * update the stack set.
- */ - TemplateBody?: string; - - /** - *A list of tags that specify information about the stack set. A maximum number of 50 - * tags can be specified.
+ * + * */ - Tags?: Tag[]; + PermissionModel?: PermissionModels | string; /** - *Detailed information about the drift status of the stack set.
- *For stack sets, contains information about the last completed drift - * operation performed on the stack set. Information about drift operations currently in - * progress is not included.
+ *[Service-managed
permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
The ID of the stack set.
+ */ + StackSetId?: string; + + /** + *The type of stack set operation: CREATE
, UPDATE
, or
+ * DELETE
. Create and delete operations affect only the specified stack set
+ * instances that are associated with the specified stack set. Update operations affect both
+ * the stack set itself, as well as all associated stack set
+ * instances.
The status of the operation.
*The time at which the stack set operation ended, across all accounts and Regions - * specified. Note that this doesn't necessarily mean that the stack set operation was - * successful, or even attempted, in each account or Region.
- */ - EndTimestamp?: Date; - - /** - *The type of stack set operation: CREATE
, UPDATE
, or
- * DELETE
. Create and delete operations affect only the specified stack set
- * instances that are associated with the specified stack set. Update operations affect both
- * the stack set itself, as well as all associated stack set
- * instances.
The preferences for how AWS CloudFormation performs this stack set + * operation.
*/ - Action?: StackSetOperationAction | string; + OperationPreferences?: StackSetOperationPreferences; /** - *Detailed information about the drift status of the stack set. This includes information - * about drift operations currently being performed on the stack set.
- *this information will only be present for stack set operations whose Action
- * type is DETECT_DRIFT
.
For more information, see Detecting Unmanaged - * Changes in Stack Sets in the AWS CloudFormation User Guide.
+ *For stack set operations of action type DELETE
, specifies whether to
+ * remove the stack instances from the specified stack set, but doesn't delete the stacks. You
+ * can't reassociate a retained stack, or add an existing, saved stack to a new stack
+ * set.
The time at which the operation was initiated. Note that the creation times for the - * stack set operation might differ from the creation time of the individual stacks - * themselves. This is because AWS CloudFormation needs to perform preparatory work for the - * operation, such as dispatching the work to the requested Regions, before actually creating - * the first stacks.
+ *The Amazon Resource Number (ARN) of the IAM role used to perform this stack set + * operation.
+ *Use customized administrator roles to control which users or groups can manage specific + * stack sets within the same administrator account. For more information, see Define Permissions for Multiple + * Administrators in the AWS CloudFormation User Guide.
*/ - CreationTimestamp?: Date; + AdministrationRoleARN?: string; /** *The name of the IAM execution role used to create or update the stack set.
@@ -4236,18 +4289,20 @@ export interface StackSetOperation { ExecutionRoleName?: string; /** - *The Amazon Resource Number (ARN) of the IAM role used to perform this stack set - * operation.
- *Use customized administrator roles to control which users or groups can manage specific - * stack sets within the same administrator account. For more information, see Define Permissions for Multiple - * Administrators in the AWS CloudFormation User Guide.
+ *The time at which the operation was initiated. Note that the creation times for the + * stack set operation might differ from the creation time of the individual stacks + * themselves. This is because AWS CloudFormation needs to perform preparatory work for the + * operation, such as dispatching the work to the requested Regions, before actually creating + * the first stacks.
*/ - AdministrationRoleARN?: string; + CreationTimestamp?: Date; /** - *The ID of the stack set.
+ *The time at which the stack set operation ended, across all accounts and Regions + * specified. Note that this doesn't necessarily mean that the stack set operation was + * successful, or even attempted, in each account or Region.
*/ - StackSetId?: string; + EndTimestamp?: Date; /** *[Service-managed
permissions] The AWS Organizations accounts affected by the stack operation.
The preferences for how AWS CloudFormation performs this stack set - * operation.
- */ - OperationPreferences?: StackSetOperationPreferences; - - /** - *For stack set operations of action type DELETE
, specifies whether to
- * remove the stack instances from the specified stack set, but doesn't delete the stacks. You
- * can't reassociate a retained stack, or add an existing, saved stack to a new stack
- * set.
Detailed information about the drift status of the stack set. This includes information + * about drift operations currently being performed on the stack set.
+ *this information will only be present for stack set operations whose Action
+ * type is DETECT_DRIFT
.
For more information, see Detecting Unmanaged + * Changes in Stack Sets in the AWS CloudFormation User Guide.
*/ - RetainStacks?: boolean; + StackSetDriftDetectionDetails?: StackSetDriftDetectionDetails; } export namespace StackSetOperation { @@ -4305,10 +4356,11 @@ export namespace OperationNotFoundException { export interface DescribeTypeInput { /** - *The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
- *If you specify a VersionId
, DescribeType
returns information about that specific type version. Otherwise, it returns information about the default type version.
The kind of type.
+ *Currently the only valid value is RESOURCE
.
Conditional: You must specify either TypeName
and Type
, or Arn
.
The name of the type.
@@ -4323,11 +4375,10 @@ export interface DescribeTypeInput { Arn?: string; /** - *The kind of type.
- *Currently the only valid value is RESOURCE
.
Conditional: You must specify either TypeName
and Type
, or Arn
.
The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
+ *If you specify a VersionId
, DescribeType
returns information about that specific type version. Otherwise, it returns information about the default type version.
The Amazon CloudWatch log group to which CloudFormation sends error logging information when invoking the type's handlers.
+ *The ARN of the role that CloudFormation should assume when sending log entries to CloudWatch logs.
*/ - LogGroupName: string | undefined; + LogRoleArn: string | undefined; /** - *The ARN of the role that CloudFormation should assume when sending log entries to CloudWatch logs.
+ *The Amazon CloudWatch log group to which CloudFormation sends error logging information when invoking the type's handlers.
*/ - LogRoleArn: string | undefined; + LogGroupName: string | undefined; } export namespace LoggingConfig { @@ -4364,48 +4415,22 @@ export type ProvisioningType = "FULLY_MUTABLE" | "IMMUTABLE" | "NON_PROVISIONABL export type Visibility = "PRIVATE" | "PUBLIC"; export interface DescribeTypeOutput { - /** - *The schema that defines the type.
- *For more information on type schemas, see Resource Provider Schema in the CloudFormation CLI User Guide.
- */ - Schema?: string; - /** *The Amazon Resource Name (ARN) of the type.
*/ Arn?: string; /** - *The URL of a page providing detailed documentation for this type.
- */ - DocumentationUrl?: string; - - /** - *The description of the registered type.
+ *The kind of type.
+ *Currently the only valid value is RESOURCE
.
The name of the registered type.
*/ TypeName?: string; - /** - *The deprecation status of the type.
- *Valid values include:
- *
- * LIVE
: The type is registered and can be used in CloudFormation operations, dependent on its provisioning behavior and visibility scope.
- * DEPRECATED
: The type has been deregistered and can no longer be used in CloudFormation operations.
The ID of the default version of the type. The default version is used when the type version is not specified.
*To set the default version of a type, use Whether the specified type version is set as the default version. The description of the registered type. The schema that defines the type. For more information on type schemas, see Resource Provider Schema in the CloudFormation CLI User Guide. The provisioning behavior of the type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted. Valid values include: Whether the specified type version is set as the default version. The deprecation status of the type. Valid values include:
+ *
+ * Contains logging configuration information for a type. The Amazon Resource Name (ARN) of the IAM execution role used to register the type. If your resource type calls AWS APIs in any of its handlers, you must create an
+ *
+ * IAM execution
+ * role
+ * that includes the necessary permissions to call those
+ * AWS APIs, and provision that execution role in your account. CloudFormation then
+ * assumes that execution role to provide your resource type with the appropriate
+ * credentials. The scope at which the type is visible and usable in CloudFormation operations. Valid values include: When the specified type version was registered. The kind of type. Currently the only valid value is The URL of the source code for the type. The Amazon Resource Name (ARN) of the IAM execution role used to register the type. If your resource type calls AWS APIs in any of its handlers, you must create an
- *
- * IAM execution
- * role
- * that includes the necessary permissions to call those
- * AWS APIs, and provision that execution role in your account. CloudFormation then
- * assumes that execution role to provide your resource type with the appropriate
- * credentials. The URL of a page providing detailed documentation for this type. The URL of the source code for the type. When the specified type version was registered. When the specified type version was registered. The Amazon Resource Name (ARN) of this specific version of the type being registered. For registration requests with a The current status of the type registration request. The Amazon Resource Name (ARN) of the type being registered. For registration requests with a The description of the type registration request. The current status of the type registration request. The Amazon Resource Name (ARN) of the type being registered. For registration requests with a The description of the type registration request. The Amazon Resource Name (ARN) of this specific version of the type being registered. For registration requests with a The logical names of any resources you want to use as filters. The name of the stack for which you want to detect drift. The name of the stack for which you want to detect drift. The logical names of any resources you want to use as filters. The logical name of the resource for which to return drift information. The name of the stack to which the resource belongs. The logical name of the resource for which to return drift information.
- * The ID of the stack set operation.
- * The user-specified preferences for how AWS CloudFormation performs a stack set
* operation. For more information on maximum concurrent accounts and failure tolerance, see Stack set operation options.
+ * The ID of the stack set operation.
+ * A list of Structure containing the template body with a minimum length of 1 byte and a maximum
+ * length of 51,200 bytes. (For more information, go to Template Anatomy
+ * in the AWS CloudFormation User Guide.) Conditional: You must pass Location of file containing the template body. The URL must point to a template that
@@ -4688,13 +4743,9 @@ export interface EstimateTemplateCostInput {
TemplateURL?: string;
/**
- * Structure containing the template body with a minimum length of 1 byte and a maximum
- * length of 51,200 bytes. (For more information, go to Template Anatomy
- * in the AWS CloudFormation User Guide.) Conditional: You must pass A list of The input for the ExecuteChangeSet action. A unique identifier for this The name or ARN of the change set that you want use to update the specified
* stack.
@@ -4414,6 +4439,22 @@ export interface DescribeTypeOutput {
*/
DefaultVersionId?: string;
+ /**
+ *
+ *
*/
- IsDefaultVersion?: boolean;
+ DeprecatedStatus?: DeprecatedStatus | string;
/**
* LIVE
: The type is registered and can be used in CloudFormation operations, dependent on its provisioning behavior and visibility scope.DEPRECATED
: The type has been deregistered and can no longer be used in CloudFormation operations. RESOURCE
.ProgressStatus
of other than COMPLETE
, this will be null
.ProgressStatus
of other than COMPLETE
, this will be null
.ProgressStatus
of other than COMPLETE
, this will be null
.ProgressStatus
of other than COMPLETE
, this will be null
.Parameter
structures that specify input parameters.TemplateBody
or TemplateURL
. If
+ * both are passed, only TemplateBody
is used.TemplateBody
or TemplateURL
. If
- * both are passed, only TemplateBody
is used.Parameter
structures that specify input parameters.ExecuteChangeSet
request. Specify this
- * token if you plan to retry requests so that AWS CloudFormation knows that you're not
- * attempting to execute a change set to update a stack with the same name. You might retry
- * ExecuteChangeSet
requests to ensure that AWS CloudFormation successfully
- * received them.
A unique identifier for this ExecuteChangeSet
request. Specify this
+ * token if you plan to retry requests so that AWS CloudFormation knows that you're not
+ * attempting to execute a change set to update a stack with the same name. You might retry
+ * ExecuteChangeSet
requests to ensure that AWS CloudFormation successfully
+ * received them.
The name or Amazon Resource Name (ARN) of a change set for which AWS CloudFormation
+ * returns the associated template. If you specify a name, you must also specify the
+ * StackName
.
For templates that include transforms, the stage of the template that AWS
* CloudFormation returns. To get the user-submitted template, specify Original
.
@@ -4832,13 +4890,6 @@ export interface GetTemplateInput {
* specifies Original
.
The name or Amazon Resource Name (ARN) of a change set for which AWS CloudFormation
- * returns the associated template. If you specify a name, you must also specify the
- * StackName
.
The name or unique ID of the stack set from which the stack was created.
+ *Structure containing the template body with a minimum length of 1 byte and a maximum + * length of 51,200 bytes. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.
*Conditional: You must specify only one of the following parameters:
* StackName
, StackSetName
, TemplateBody
, or
* TemplateURL
.
The name or the stack ID that is associated with the stack, which are not always - * interchangeable. For running stacks, you can specify either the stack's name or its unique - * stack ID. For deleted stack, you must specify the unique stack ID.
+ *Location of file containing the template body. The URL must point to a template (max + * size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information about + * templates, see Template Anatomy + * in the AWS CloudFormation User Guide.
*Conditional: You must specify only one of the following parameters:
* StackName
, StackSetName
, TemplateBody
, or
* TemplateURL
.
Structure containing the template body with a minimum length of 1 byte and a maximum - * length of 51,200 bytes. For more information about templates, see Template Anatomy in the AWS CloudFormation User Guide.
+ *The name or the stack ID that is associated with the stack, which are not always + * interchangeable. For running stacks, you can specify either the stack's name or its unique + * stack ID. For deleted stack, you must specify the unique stack ID.
*Conditional: You must specify only one of the following parameters:
* StackName
, StackSetName
, TemplateBody
, or
* TemplateURL
.
Location of file containing the template body. The URL must point to a template (max - * size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information about - * templates, see Template Anatomy - * in the AWS CloudFormation User Guide.
+ *The name or unique ID of the stack set from which the stack was created.
*Conditional: You must specify only one of the following parameters:
* StackName
, StackSetName
, TemplateBody
, or
* TemplateURL
.
The ParameterDeclaration data type.
*/ export interface ParameterDeclaration { - /** - *The criteria that AWS CloudFormation uses to validate parameter values.
- */ - ParameterConstraints?: ParameterConstraints; - /** *The name that is associated with the parameter.
*/ @@ -4960,6 +5006,11 @@ export interface ParameterDeclaration { */ DefaultValue?: string; + /** + *The type of parameter.
+ */ + ParameterType?: string; + /** *Flag that indicates whether the parameter value is shown as plain text in logs and in * the AWS Management Console.
@@ -4972,9 +5023,9 @@ export interface ParameterDeclaration { Description?: string; /** - *The type of parameter.
+ *The criteria that AWS CloudFormation uses to validate parameter values.
*/ - ParameterType?: string; + ParameterConstraints?: ParameterConstraints; } export namespace ParameterDeclaration { @@ -4995,18 +5046,18 @@ export interface ResourceIdentifierSummary { */ ResourceType?: string; + /** + *The logical IDs of the target resources of the specified ResourceType
, as
+ * defined in the import template.
The resource properties you can provide during the import to identify your target
* resources. For example, BucketName
is a possible identifier property for
* AWS::S3::Bucket
resources.
The logical IDs of the target resources of the specified ResourceType
, as
- * defined in the import template.
The value that is defined for the Metadata
property of the
+ *
A list of parameter declarations that describe various properties for each + * parameter.
+ */ + Parameters?: ParameterDeclaration[]; + + /** + *The value that is defined in the Description
property of the
* template.
The capabilities found within the template. If your template contains IAM resources, @@ -5034,12 +5091,31 @@ export interface GetTemplateSummaryOutput { */ Capabilities?: (Capability | string)[]; + /** + *
The list of resources that generated the values in the Capabilities
+ * response element.
A list of all the template resource types that are defined in the template, such as
+ * AWS::EC2::Instance
, AWS::Dynamo::Table
, and
+ * Custom::MyCustomInstance
.
The AWS template format version, which identifies the capabilities of the * template.
*/ Version?: string; + /** + *The value that is defined for the Metadata
property of the
+ * template.
A list of the transforms that are declared in the template.
*/ @@ -5052,31 +5128,6 @@ export interface GetTemplateSummaryOutput { *AWS::S3::Bucket
resource.
*/
ResourceIdentifierSummaries?: ResourceIdentifierSummary[];
-
- /**
- * The list of resources that generated the values in the Capabilities
- * response element.
A list of parameter declarations that describe various properties for each - * parameter.
- */ - Parameters?: ParameterDeclaration[]; - - /** - *A list of all the template resource types that are defined in the template, such as
- * AWS::EC2::Instance
, AWS::Dynamo::Table
, and
- * Custom::MyCustomInstance
.
The value that is defined in the Description
property of the
- * template.
The stack that contains the exported output name and value.
+ */ + ExportingStackId?: string; + /** *The name of exported output value. Use this name and the Fn::ImportValue
* function to import the associated value into other stacks. The name is defined in the
@@ -5164,11 +5220,6 @@ export interface Export {
* section.
The stack that contains the exported output name and value.
- */ - ExportingStackId?: string; } export namespace Export { @@ -5179,15 +5230,15 @@ export namespace Export { export interface ListExportsOutput { /** - *If the output exceeds 100 exported output values, a string that identifies the next - * page of exports. If there is no additional page, this value is null.
+ *The output for the ListExports action.
*/ - NextToken?: string; + Exports?: Export[]; /** - *The output for the ListExports action.
+ *If the output exceeds 100 exported output values, a string that identifies the next + * page of exports. If there is no additional page, this value is null.
*/ - Exports?: Export[]; + NextToken?: string; } export namespace ListExportsOutput { @@ -5197,18 +5248,18 @@ export namespace ListExportsOutput { } export interface ListImportsInput { + /** + *The name of the exported output value. AWS CloudFormation returns the stack names + * that are importing this value.
+ */ + ExportName: string | undefined; + /** *A string (provided by the ListImports response output) that * identifies the next page of stacks that are importing the specified exported output value. *
*/ NextToken?: string; - - /** - *The name of the exported output value. AWS CloudFormation returns the stack names - * that are importing this value.
- */ - ExportName: string | undefined; } export namespace ListImportsInput { @@ -5218,17 +5269,17 @@ export namespace ListImportsInput { } export interface ListImportsOutput { - /** - *A string that identifies the next page of exports. If there is no additional page, - * this value is null.
- */ - NextToken?: string; - /** *A list of stack names that are importing the specified exported output value. *
*/ Imports?: string[]; + + /** + *A string that identifies the next page of exports. If there is no additional page, + * this value is null.
+ */ + NextToken?: string; } export namespace ListImportsOutput { @@ -5267,11 +5318,6 @@ export interface ListStackInstancesInput { */ StackSetName: string | undefined; - /** - *The name of the Region where you want to list stack instances.
- */ - StackInstanceRegion?: string; - /** *If the previous request didn't return all of the remaining results, the response's
* NextToken
parameter value is set to a token. To retrieve the next set of
@@ -5281,11 +5327,6 @@ export interface ListStackInstancesInput {
*/
NextToken?: string;
- /**
- *
The name of the AWS account that you want to list stack instances for.
- */ - StackInstanceAccount?: string; - /** *The maximum number of results to be returned with a single call. If the number of
* available results exceeds this maximum, the response includes a NextToken
@@ -5298,6 +5339,16 @@ export interface ListStackInstancesInput {
*
The status that stack instances are filtered by.
*/ Filters?: StackInstanceFilter[]; + + /** + *The name of the AWS account that you want to list stack instances for.
+ */ + StackInstanceAccount?: string; + + /** + *The name of the Region where you want to list stack instances.
+ */ + StackInstanceRegion?: string; } export namespace ListStackInstancesInput { @@ -5306,14 +5357,30 @@ export namespace ListStackInstancesInput { }); } -/** - *The structure that contains summary information about a stack instance.
- */ -export interface StackInstanceSummary { +/** + *The structure that contains summary information about a stack instance.
+ */ +export interface StackInstanceSummary { + /** + *The name or unique ID of the stack set that the stack instance is associated + * with.
+ */ + StackSetId?: string; + + /** + *The name of the AWS Region that the stack instance is associated with.
+ */ + Region?: string; + + /** + *[Self-managed
permissions] The name of the AWS account that the stack instance is associated with.
[Service-managed
permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
The ID of the stack instance.
*/ - OrganizationalUnitId?: string; + StackId?: string; /** *The status of the stack instance, in terms of its synchronization with its associated @@ -5354,20 +5421,20 @@ export interface StackInstanceSummary { Status?: StackInstanceStatus | string; /** - *
The name of the AWS Region that the stack instance is associated with.
+ *The explanation for the specific status code assigned to this stack + * instance.
*/ - Region?: string; + StatusReason?: string; /** - *The ID of the stack instance.
+ *The detailed status of the stack instance.
*/ - StackId?: string; + StackInstanceStatus?: StackInstanceComprehensiveStatus; /** - *The explanation for the specific status code assigned to this stack - * instance.
+ *[Service-managed
permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
Status of the stack instance's actual configuration compared to the expected template @@ -5398,28 +5465,12 @@ export interface StackInstanceSummary { */ DriftStatus?: StackDriftStatus | string; - /** - *
The detailed status of the stack instance.
- */ - StackInstanceStatus?: StackInstanceComprehensiveStatus; - /** *Most recent time when CloudFormation performed a drift detection operation on the stack
* instance. This value will be NULL
for any stack instance on which drift
* detection has not yet been performed.
[Self-managed
permissions] The name of the AWS account that the stack instance is associated with.
The name or unique ID of the stack set that the stack instance is associated - * with.
- */ - StackSetId?: string; } export namespace StackInstanceSummary { @@ -5429,6 +5480,12 @@ export namespace StackInstanceSummary { } export interface ListStackInstancesOutput { + /** + *A list of StackInstanceSummary
structures that contain information about
+ * the specified stack instances.
If the request doesn't return all of the remaining results, NextToken
is
* set to a token. To retrieve the next set of results, call ListStackInstances
@@ -5437,12 +5494,6 @@ export interface ListStackInstancesOutput {
* null
.
A list of StackInstanceSummary
structures that contain information about
- * the specified stack instances.
The input for the ListStackResource action.
*/ export interface ListStackResourcesInput { - /** - *A string that identifies the next page of stack resources that you want to - * retrieve.
- */ - NextToken?: string; - /** *The name or the unique stack ID that is associated with the stack, which are not * always interchangeable:
@@ -5476,6 +5521,12 @@ export interface ListStackResourcesInput { *Default: There is no default value.
*/ StackName: string | undefined; + + /** + *A string that identifies the next page of stack resources that you want to + * retrieve.
+ */ + NextToken?: string; } export namespace ListStackResourcesInput { @@ -5538,12 +5589,23 @@ export namespace StackResourceDriftInformationSummary { *Contains high-level information about the specified stack resource.
*/ export interface StackResourceSummary { + /** + *The logical name of the resource specified in the template.
+ */ + LogicalResourceId: string | undefined; + /** *The name or unique identifier that corresponds to a physical instance ID of the * resource.
*/ PhysicalResourceId?: string; + /** + *Type of resource. (For more information, go to AWS + * Resource Types Reference in the AWS CloudFormation User Guide.)
+ */ + ResourceType: string | undefined; + /** *Time the status was updated.
*/ @@ -5559,17 +5621,6 @@ export interface StackResourceSummary { */ ResourceStatusReason?: string; - /** - *Type of resource. (For more information, go to AWS - * Resource Types Reference in the AWS CloudFormation User Guide.)
- */ - ResourceType: string | undefined; - - /** - *The logical name of the resource specified in the template.
- */ - LogicalResourceId: string | undefined; - /** *Information about whether the resource's actual configuration differs, or has * drifted, from its expected configuration, as defined in the stack @@ -5577,6 +5628,11 @@ export interface StackResourceSummary { * Changes to Stacks and Resources.
*/ DriftInformation?: StackResourceDriftInformationSummary; + + /** + *Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
+ */ + ModuleInfo?: ModuleInfo; } export namespace StackResourceSummary { @@ -5683,17 +5739,30 @@ export namespace StackDriftInformationSummary { */ export interface StackSummary { /** - *Summarizes information on whether a stack's actual configuration differs, or has - * drifted, from it's expected configuration, as defined in the stack - * template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration - * Changes to Stacks and Resources.
+ *Unique stack identifier.
*/ - DriftInformation?: StackDriftInformationSummary; + StackId?: string; /** - *Success/Failure message associated with the stack status.
+ *The name associated with the stack.
*/ - StackStatusReason?: string; + StackName: string | undefined; + + /** + *The template description of the template used to create the stack.
+ */ + TemplateDescription?: string; + + /** + *The time the stack was created.
+ */ + CreationTime: Date | undefined; + + /** + *The time the stack was last updated. This field will only be returned if the stack + * has been updated at least once.
+ */ + LastUpdatedTime?: Date; /** *The time the stack was deleted.
@@ -5705,6 +5774,11 @@ export interface StackSummary { */ StackStatus: StackStatus | string | undefined; + /** + *Success/Failure message associated with the stack status.
+ */ + StackStatusReason?: string; + /** *For nested stacks--stacks created as resources for another stack--the stack ID of the * direct parent of this stack. For the first level of nested stacks, the root stack is also @@ -5723,30 +5797,12 @@ export interface StackSummary { RootId?: string; /** - *
The template description of the template used to create the stack.
- */ - TemplateDescription?: string; - - /** - *Unique stack identifier.
- */ - StackId?: string; - - /** - *The time the stack was last updated. This field will only be returned if the stack - * has been updated at least once.
- */ - LastUpdatedTime?: Date; - - /** - *The time the stack was created.
- */ - CreationTime: Date | undefined; - - /** - *The name associated with the stack.
+ *Summarizes information on whether a stack's actual configuration differs, or has + * drifted, from it's expected configuration, as defined in the stack + * template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration + * Changes to Stacks and Resources.
*/ - StackName: string | undefined; + DriftInformation?: StackDriftInformationSummary; } export namespace StackSummary { @@ -5779,6 +5835,17 @@ export namespace ListStacksOutput { } export interface ListStackSetOperationResultsInput { + /** + *The name or unique ID of the stack set that you want to get operation results + * for.
+ */ + StackSetName: string | undefined; + + /** + *The ID of the stack set operation.
+ */ + OperationId: string | undefined; + /** *If the previous request didn't return all of the remaining results, the response
* object's NextToken
parameter value is set to a token. To retrieve the next set
@@ -5789,11 +5856,6 @@ export interface ListStackSetOperationResultsInput {
*/
NextToken?: string;
- /**
- *
The ID of the stack set operation.
- */ - OperationId: string | undefined; - /** *The maximum number of results to be returned with a single call. If the number of
* available results exceeds this maximum, the response includes a NextToken
@@ -5801,12 +5863,6 @@ export interface ListStackSetOperationResultsInput {
* set of results.
The name or unique ID of the stack set that you want to get operation results - * for.
- */ - StackSetName: string | undefined; } export namespace ListStackSetOperationResultsInput { @@ -5822,6 +5878,16 @@ export type StackSetOperationResultStatus = "CANCELLED" | "FAILED" | "PENDING" | * given account in a given Region. */ export interface StackSetOperationResultSummary { + /** + *[Self-managed
permissions] The name of the AWS account for this operation result.
The name of the AWS Region for this operation result.
+ */ + Region?: string; + /** *The result status of the stack set operation for the given account in the given * Region.
@@ -5859,14 +5925,9 @@ export interface StackSetOperationResultSummary { Status?: StackSetOperationResultStatus | string; /** - *[Service-managed
permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
[Self-managed
permissions] The name of the AWS account for this operation result.
The reason for the assigned result status.
*/ - Account?: string; + StatusReason?: string; /** *The results of the account gate function AWS CloudFormation invokes, if present, @@ -5875,14 +5936,9 @@ export interface StackSetOperationResultSummary { AccountGateResult?: AccountGateResult; /** - *
The name of the AWS Region for this operation result.
- */ - Region?: string; - - /** - *The reason for the assigned result status.
+ *[Service-managed
permissions] The organization root ID or organizational unit (OU) IDs that you specified for DeploymentTargets.
A list of StackSetOperationResultSummary
structures that contain
+ * information about the specified operation results, for accounts and Regions that are
+ * included in the operation.
If the request doesn't return all results, NextToken
is set to a token.
* To retrieve the next set of results, call ListOperationResults
again and
@@ -5899,13 +5962,6 @@ export interface ListStackSetOperationResultsOutput {
* remaining results, NextToken
is set to null
.
A list of StackSetOperationResultSummary
structures that contain
- * information about the specified operation results, for accounts and Regions that are
- * included in the operation.
The name or unique ID of the stack set that you want to get operation summaries + * for.
+ */ + StackSetName: string | undefined; + /** *If the previous paginated request didn't return all of the remaining results, the
* response object's NextToken
parameter value is set to a token. To retrieve the
@@ -5932,12 +5994,6 @@ export interface ListStackSetOperationsInput {
* set of results.
The name or unique ID of the stack set that you want to get operation summaries - * for.
- */ - StackSetName: string | undefined; } export namespace ListStackSetOperationsInput { @@ -5956,6 +6012,15 @@ export interface StackSetOperationSummary { */ OperationId?: string; + /** + *The type of operation: CREATE
, UPDATE
, or
+ * DELETE
. Create and delete operations affect only the specified stack
+ * instances that are associated with the specified stack set. Update operations affect both
+ * the stack set itself as well as all associated stack set
+ * instances.
The overall status of the operation.
*The time at which the stack set operation ended, across all accounts and Regions - * specified. Note that this doesn't necessarily mean that the stack set operation was - * successful, or even attempted, in each account or Region.
- */ - EndTimestamp?: Date; - /** *The time at which the operation was initiated. Note that the creation times for the * stack set operation might differ from the creation time of the individual stacks @@ -6013,13 +6071,11 @@ export interface StackSetOperationSummary { CreationTimestamp?: Date; /** - *
The type of operation: CREATE
, UPDATE
, or
- * DELETE
. Create and delete operations affect only the specified stack
- * instances that are associated with the specified stack set. Update operations affect both
- * the stack set itself as well as all associated stack set
- * instances.
The time at which the stack set operation ended, across all accounts and Regions + * specified. Note that this doesn't necessarily mean that the stack set operation was + * successful, or even attempted, in each account or Region.
*/ - Action?: StackSetOperationAction | string; + EndTimestamp?: Date; } export namespace StackSetOperationSummary { @@ -6028,7 +6084,13 @@ export namespace StackSetOperationSummary { }); } -export interface ListStackSetOperationsOutput { +export interface ListStackSetOperationsOutput { + /** + *A list of StackSetOperationSummary
structures that contain summary
+ * information about operations for the specified stack set.
If the request doesn't return all results, NextToken
is set to a token.
* To retrieve the next set of results, call ListOperationResults
again and
@@ -6036,12 +6098,6 @@ export interface ListStackSetOperationsOutput {
* remaining results, NextToken
is set to null
.
A list of StackSetOperationSummary
structures that contain summary
- * information about operations for the specified stack set.
The status of the stack sets that you want to get summary information - * about.
+ *If the previous paginated request didn't return all of the remaining results, the
+ * response object's NextToken
parameter value is set to a token. To retrieve the
+ * next set of results, call ListStackSets
again and assign that token to the
+ * request object's NextToken
parameter. If there are no remaining results, the
+ * previous response object's NextToken
parameter is set to
+ * null
.
The maximum number of results to be returned with a single call. If the number of @@ -6066,14 +6126,10 @@ export interface ListStackSetsInput { MaxResults?: number; /** - *
If the previous paginated request didn't return all of the remaining results, the
- * response object's NextToken
parameter value is set to a token. To retrieve the
- * next set of results, call ListStackSets
again and assign that token to the
- * request object's NextToken
parameter. If there are no remaining results, the
- * previous response object's NextToken
parameter is set to
- * null
.
The status of the stack sets that you want to get summary information + * about.
*/ - NextToken?: string; + Status?: StackSetStatus | string; } export namespace ListStackSetsInput { @@ -6087,23 +6143,44 @@ export namespace ListStackSetsInput { * set. */ export interface StackSetSummary { + /** + *The name of the stack set.
+ */ + StackSetName?: string; + + /** + *The ID of the stack set.
+ */ + StackSetId?: string; + /** *A description of the stack set that you specify when the stack set is created or * updated.
*/ Description?: string; + /** + *The status of the stack set.
+ */ + Status?: StackSetStatus | string; + /** *[Service-managed
permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organizational unit (OU).
Most recent time when CloudFormation performed a drift detection operation on the stack
- * set. This value will be NULL
for any stack set on which drift detection has
- * not yet been performed.
Describes how the IAM roles required for stack set operations are created.
+ *With self-managed
permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see Grant Self-Managed Stack Set Permissions.
With service-managed
permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations. For more information, see Grant Service-Managed Stack Set Permissions.
Status of the stack set's actual configuration compared to its expected template and @@ -6136,32 +6213,11 @@ export interface StackSetSummary { DriftStatus?: StackDriftStatus | string; /** - *
The name of the stack set.
- */ - StackSetName?: string; - - /** - *The ID of the stack set.
- */ - StackSetId?: string; - - /** - *The status of the stack set.
- */ - Status?: StackSetStatus | string; - - /** - *Describes how the IAM roles required for stack set operations are created.
- *With self-managed
permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see Grant Self-Managed Stack Set Permissions.
With service-managed
permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations. For more information, see Grant Service-Managed Stack Set Permissions.
Most recent time when CloudFormation performed a drift detection operation on the stack
+ * set. This value will be NULL
for any stack set on which drift detection has
+ * not yet been performed.
If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
The name of the type.
+ *Conditional: You must specify either TypeName
and Type
, or Arn
.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The Amazon Resource Name (ARN) of the type.
+ *Conditional: You must specify either TypeName
and Type
, or Arn
.
The current status of the type registration request.
@@ -6218,16 +6276,14 @@ export interface ListTypeRegistrationsInput { RegistrationStatusFilter?: RegistrationStatus | string; /** - *The Amazon Resource Name (ARN) of the type.
- *Conditional: You must specify either TypeName
and Type
, or Arn
.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The name of the type.
- *Conditional: You must specify either TypeName
and Type
, or Arn
.
If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
If the request doesn't return all of the remaining results, NextToken
is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If the request returns all results, NextToken
is set to null
.
A list of type registration tokens.
*Use
@@ -6249,6 +6300,11 @@ export interface ListTypeRegistrationsOutput {
*
to return detailed information about a type registration request.
If the request doesn't return all of the remaining results, NextToken
is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If the request returns all results, NextToken
is set to null
.
The deprecation status of the types that you want to get summary information about.
+ *The scope at which the type is visible and usable in CloudFormation operations.
*Valid values include:
*
- * LIVE
: The type is registered for use in CloudFormation operations.
PRIVATE
: The type is only visible and usable within the account in which it is registered. Currently, AWS CloudFormation marks any types you create as PRIVATE
.
*
- * DEPRECATED
: The type has been deregistered and can no longer be used in CloudFormation operations.
PUBLIC
: The type is publically visible and usable within any Amazon account.
* The default is PRIVATE
.
The provisioning behavior of the type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.
@@ -6295,31 +6352,35 @@ export interface ListTypesInput { ProvisioningType?: ProvisioningType | string; /** - *If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
The scope at which the type is visible and usable in CloudFormation operations.
+ *The deprecation status of the types that you want to get summary information about.
*Valid values include:
*
- * PRIVATE
: The type is only visible and usable within the account in which it is registered. Currently, AWS CloudFormation marks any types you create as PRIVATE
.
LIVE
: The type is registered for use in CloudFormation operations.
*
- * PUBLIC
: The type is publically visible and usable within any Amazon account.
DEPRECATED
: The type has been deregistered and can no longer be used in CloudFormation operations.
* The default is PRIVATE
.
The type of extension.
+ */ + Type?: RegistryType | string; + + /** + *The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
The description of the type.
+ *The kind of type.
*/ - Description?: string; + Type?: RegistryType | string; /** *The name of the type.
@@ -6343,9 +6404,12 @@ export interface TypeSummary { TypeName?: string; /** - *The kind of type.
+ *The ID of the default version of the type. The default version is used when the type version is not specified.
+ *To set the default version of a type, use
+ * SetTypeDefaultVersion
+ *
.
The Amazon Resource Name (ARN) of the type.
@@ -6358,12 +6422,9 @@ export interface TypeSummary { LastUpdated?: Date; /** - *The ID of the default version of the type. The default version is used when the type version is not specified.
- *To set the default version of a type, use
- * SetTypeDefaultVersion
- *
.
The description of the type.
*/ - DefaultVersionId?: string; + Description?: string; } export namespace TypeSummary { @@ -6391,11 +6452,6 @@ export namespace ListTypesOutput { } export interface ListTypeVersionsInput { - /** - *If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
The kind of the type.
*Currently the only valid value is RESOURCE
.
The name of the type for which you want version summary information.
+ *Conditional: You must specify either TypeName
and Type
, or Arn
.
The Amazon Resource Name (ARN) of the type for which you want version summary information.
+ *Conditional: You must specify either TypeName
and Type
, or Arn
.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken
value that you can assign to the NextToken
request parameter to get the next set of results.
If the previous paginated request didn't return all of the remaining results, the response object's NextToken
parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken
parameter. If there are no remaining results, the previous response object's NextToken
parameter is set to null
.
The deprecation status of the type versions that you want to get summary information about.
*Valid values include:
@@ -6424,18 +6497,6 @@ export interface ListTypeVersionsInput { *The default is LIVE
.
The name of the type for which you want version summary information.
- *Conditional: You must specify either TypeName
and Type
, or Arn
.
The Amazon Resource Name (ARN) of the type for which you want version summary information.
- *Conditional: You must specify either TypeName
and Type
, or Arn
.
Whether the specified type version is set as the default version.
+ *The kind of type.
*/ - IsDefaultVersion?: boolean; + Type?: RegistryType | string; /** - *The description of the type version.
+ *The name of the type.
*/ - Description?: string; + TypeName?: string; /** - *The Amazon Resource Name (ARN) of the type version.
+ *The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
*/ - Arn?: string; + VersionId?: string; /** - *The name of the type.
+ *Whether the specified type version is set as the default version.
*/ - TypeName?: string; + IsDefaultVersion?: boolean; /** - *The kind of type.
+ *The Amazon Resource Name (ARN) of the type version.
*/ - Type?: RegistryType | string; + Arn?: string; /** *When the version was registered.
@@ -6479,9 +6540,9 @@ export interface TypeVersionSummary { TimeCreated?: Date; /** - *The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
+ *The description of the type version.
*/ - VersionId?: string; + Description?: string; } export namespace TypeVersionSummary { @@ -6566,17 +6627,17 @@ export interface RecordHandlerProgressInput { /** *Reserved for use by the CloudFormation CLI.
*/ - ClientRequestToken?: string; + BearerToken: string | undefined; /** *Reserved for use by the CloudFormation CLI.
*/ - CurrentOperationStatus?: OperationStatus | string; + OperationStatus: OperationStatus | string | undefined; /** *Reserved for use by the CloudFormation CLI.
*/ - BearerToken: string | undefined; + CurrentOperationStatus?: OperationStatus | string; /** *Reserved for use by the CloudFormation CLI.
@@ -6586,17 +6647,17 @@ export interface RecordHandlerProgressInput { /** *Reserved for use by the CloudFormation CLI.
*/ - ResourceModel?: string; + ErrorCode?: HandlerErrorCode | string; /** *Reserved for use by the CloudFormation CLI.
*/ - ErrorCode?: HandlerErrorCode | string; + ResourceModel?: string; /** *Reserved for use by the CloudFormation CLI.
*/ - OperationStatus: OperationStatus | string | undefined; + ClientRequestToken?: string; } export namespace RecordHandlerProgressInput { @@ -6614,6 +6675,12 @@ export namespace RecordHandlerProgressOutput { } export interface RegisterTypeInput { + /** + *The kind of type.
+ *Currently, the only valid value is RESOURCE
.
The name of the type being registered.
*We recommend that type names adhere to the following pattern: company_or_organization::service::type.
@@ -6670,10 +6737,9 @@ export interface RegisterTypeInput { SchemaHandlerPackage: string | undefined; /** - *The kind of type.
- *Currently, the only valid value is RESOURCE
.
Specifies logging configuration information for a type.
*/ - Type?: RegistryType | string; + LoggingConfig?: LoggingConfig; /** *The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the resource provider. If your resource type calls AWS APIs in any of its handlers, you must create an @@ -6689,11 +6755,6 @@ export interface RegisterTypeInput { *
A unique identifier that acts as an idempotency key for this registration request. Specifying a client request token prevents CloudFormation from generating more than one version of a type from the same registeration request, even if the request is submitted multiple times.
*/ ClientRequestToken?: string; - - /** - *Specifies logging configuration information for a type.
- */ - LoggingConfig?: LoggingConfig; } export namespace RegisterTypeInput { @@ -6763,16 +6824,16 @@ export interface SetTypeDefaultVersionInput { */ Type?: RegistryType | string; - /** - *The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
- */ - VersionId?: string; - /** *The name of the type.
*Conditional: You must specify either TypeName
and Type
, or Arn
.
The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.
+ */ + VersionId?: string; } export namespace SetTypeDefaultVersionInput { @@ -6804,6 +6865,12 @@ export interface SignalResourceInput { */ StackName: string | undefined; + /** + *The logical ID of the resource that you want to signal. The logical ID is the name of + * the resource that given in the template.
+ */ + LogicalResourceId: string | undefined; + /** *A unique ID of the signal. When you signal Amazon EC2 instances or Auto Scaling * groups, specify the instance ID that you are signaling as the unique ID. If you send @@ -6817,12 +6884,6 @@ export interface SignalResourceInput { * AWS CloudFormation to immediately fail the stack creation or update.
*/ Status: ResourceSignalStatus | string | undefined; - - /** - *The logical ID of the resource that you want to signal. The logical ID is the name of - * the resource that given in the template.
- */ - LogicalResourceId: string | undefined; } export namespace SignalResourceInput { @@ -6863,21 +6924,9 @@ export namespace StopStackSetOperationOutput { */ export interface UpdateStackInput { /** - *Structure containing the temporary overriding stack policy body. You can specify
- * either the StackPolicyDuringUpdateBody
or the
- * StackPolicyDuringUpdateURL
parameter, but not both.
If you want to update protected resources, specify a temporary overriding stack - * policy during this update. If you do not specify a stack policy, the current policy that is - * associated with the stack will be used.
- */ - StackPolicyDuringUpdateBody?: string; - - /** - *A list of Parameter
structures that specify input parameters for the
- * stack. For more information, see the Parameter data
- * type.
The name or unique stack ID of the stack to update.
*/ - Parameters?: Parameter[]; + StackName: string | undefined; /** *Structure containing the template body with a minimum length of 1 byte and a maximum @@ -6889,60 +6938,6 @@ export interface UpdateStackInput { */ TemplateBody?: string; - /** - *
Structure containing a new stack policy body. You can specify either the
- * StackPolicyBody
or the StackPolicyURL
parameter, but not
- * both.
You might update the stack policy, for example, in order to protect a new resource - * that you created during a stack update. If you do not specify a stack policy, the current - * policy that is associated with the stack is unchanged.
- */ - StackPolicyBody?: string; - - /** - *Location of a file containing the temporary overriding stack policy. The URL must
- * point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack.
- * You can specify either the StackPolicyDuringUpdateBody
or the
- * StackPolicyDuringUpdateURL
parameter, but not both.
If you want to update protected resources, specify a temporary overriding stack - * policy during this update. If you do not specify a stack policy, the current policy that is - * associated with the stack will be used.
- */ - StackPolicyDuringUpdateURL?: string; - - /** - *The name or unique stack ID of the stack to update.
- */ - StackName: string | undefined; - - /** - *Location of a file containing the updated stack policy. The URL must point to a
- * policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. You can
- * specify either the StackPolicyBody
or the StackPolicyURL
- * parameter, but not both.
You might update the stack policy, for example, in order to protect a new resource - * that you created during a stack update. If you do not specify a stack policy, the current - * policy that is associated with the stack is unchanged.
- */ - StackPolicyURL?: string; - - /** - *Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that AWS - * CloudFormation associates with the stack. Specify an empty list to remove all notification - * topics.
- */ - NotificationARNs?: string[]; - - /** - *Key-value pairs to associate with this stack. AWS CloudFormation also propagates - * these tags to supported resources in the stack. You can specify a maximum number of 50 - * tags.
- *If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's - * tags. If you specify an empty value, AWS CloudFormation removes all associated - * tags.
- */ - Tags?: Tag[]; - /** *Location of file containing the template body. The URL must point to a template that * is located in an Amazon S3 bucket. For more information, go to Template Anatomy @@ -6953,38 +6948,6 @@ export interface UpdateStackInput { */ TemplateURL?: string; - /** - *
A unique identifier for this UpdateStack
request. Specify this token if
- * you plan to retry requests so that AWS CloudFormation knows that you're not attempting to
- * update a stack with the same name. You might retry UpdateStack
requests to
- * ensure that AWS CloudFormation successfully received them.
All events triggered by a given stack operation are assigned the same client request
- * token, which you can use to track operations. For example, if you execute a
- * CreateStack
operation with the token token1
, then all the
- * StackEvents
generated by that operation will have
- * ClientRequestToken
set as token1
.
In the console, stack operations display the client request token on the Events tab.
- * Stack operations that are initiated from the console use the token format
- * Console-StackOperation-ID, which helps you easily identify the
- * stack operation . For example, if you create a stack using the console, each stack event
- * would be assigned the same token in the following format:
- * Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002
.
The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role - * that AWS CloudFormation assumes to update the stack. AWS CloudFormation uses the role's - * credentials to make calls on your behalf. AWS CloudFormation always uses this role for all - * future operations on the stack. As long as users have permission to operate on the stack, - * AWS CloudFormation uses this role even if the users don't have permission to pass it. - * Ensure that the role grants least privilege.
- *If you don't specify a value, AWS CloudFormation uses the role that was previously - * associated with the stack. If no role is available, AWS CloudFormation uses a temporary - * session that is generated from your user credentials.
- */ - RoleARN?: string; - /** *Reuse the existing template that is associated with the stack that you are * updating.
@@ -6995,21 +6958,32 @@ export interface UpdateStackInput { UsePreviousTemplate?: boolean; /** - *The rollback triggers for AWS CloudFormation to monitor during stack creation and - * updating operations, and for the specified monitoring period afterwards.
+ *Structure containing the temporary overriding stack policy body. You can specify
+ * either the StackPolicyDuringUpdateBody
or the
+ * StackPolicyDuringUpdateURL
parameter, but not both.
If you want to update protected resources, specify a temporary overriding stack + * policy during this update. If you do not specify a stack policy, the current policy that is + * associated with the stack will be used.
*/ - RollbackConfiguration?: RollbackConfiguration; + StackPolicyDuringUpdateBody?: string; /** - *The template resource types that you have permissions to work with for this update
- * stack action, such as AWS::EC2::Instance
, AWS::EC2::*
, or
- * Custom::MyCustomInstance
.
If the list of resource types doesn't include a resource that you're updating, the - * stack update fails. By default, AWS CloudFormation grants permissions to all resource - * types. AWS Identity and Access Management (IAM) uses this parameter for AWS - * CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.
+ *Location of a file containing the temporary overriding stack policy. The URL must
+ * point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack.
+ * You can specify either the StackPolicyDuringUpdateBody
or the
+ * StackPolicyDuringUpdateURL
parameter, but not both.
If you want to update protected resources, specify a temporary overriding stack + * policy during this update. If you do not specify a stack policy, the current policy that is + * associated with the stack will be used.
*/ - ResourceTypes?: string[]; + StackPolicyDuringUpdateURL?: string; + + /** + *A list of Parameter
structures that specify input parameters for the
+ * stack. For more information, see the Parameter data
+ * type.
In some cases, you must explicitly acknowledge that your stack template contains certain @@ -7116,6 +7090,93 @@ export interface UpdateStackInput { *
The template resource types that you have permissions to work with for this update
+ * stack action, such as AWS::EC2::Instance
, AWS::EC2::*
, or
+ * Custom::MyCustomInstance
.
If the list of resource types doesn't include a resource that you're updating, the + * stack update fails. By default, AWS CloudFormation grants permissions to all resource + * types. AWS Identity and Access Management (IAM) uses this parameter for AWS + * CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management.
+ */ + ResourceTypes?: string[]; + + /** + *The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role + * that AWS CloudFormation assumes to update the stack. AWS CloudFormation uses the role's + * credentials to make calls on your behalf. AWS CloudFormation always uses this role for all + * future operations on the stack. As long as users have permission to operate on the stack, + * AWS CloudFormation uses this role even if the users don't have permission to pass it. + * Ensure that the role grants least privilege.
+ *If you don't specify a value, AWS CloudFormation uses the role that was previously + * associated with the stack. If no role is available, AWS CloudFormation uses a temporary + * session that is generated from your user credentials.
+ */ + RoleARN?: string; + + /** + *The rollback triggers for AWS CloudFormation to monitor during stack creation and + * updating operations, and for the specified monitoring period afterwards.
+ */ + RollbackConfiguration?: RollbackConfiguration; + + /** + *Structure containing a new stack policy body. You can specify either the
+ * StackPolicyBody
or the StackPolicyURL
parameter, but not
+ * both.
You might update the stack policy, for example, in order to protect a new resource + * that you created during a stack update. If you do not specify a stack policy, the current + * policy that is associated with the stack is unchanged.
+ */ + StackPolicyBody?: string; + + /** + *Location of a file containing the updated stack policy. The URL must point to a
+ * policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. You can
+ * specify either the StackPolicyBody
or the StackPolicyURL
+ * parameter, but not both.
You might update the stack policy, for example, in order to protect a new resource + * that you created during a stack update. If you do not specify a stack policy, the current + * policy that is associated with the stack is unchanged.
+ */ + StackPolicyURL?: string; + + /** + *Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that AWS + * CloudFormation associates with the stack. Specify an empty list to remove all notification + * topics.
+ */ + NotificationARNs?: string[]; + + /** + *Key-value pairs to associate with this stack. AWS CloudFormation also propagates + * these tags to supported resources in the stack. You can specify a maximum number of 50 + * tags.
+ *If you don't specify this parameter, AWS CloudFormation doesn't modify the stack's + * tags. If you specify an empty value, AWS CloudFormation removes all associated + * tags.
+ */ + Tags?: Tag[]; + + /** + *A unique identifier for this UpdateStack
request. Specify this token if
+ * you plan to retry requests so that AWS CloudFormation knows that you're not attempting to
+ * update a stack with the same name. You might retry UpdateStack
requests to
+ * ensure that AWS CloudFormation successfully received them.
All events triggered by a given stack operation are assigned the same client request
+ * token, which you can use to track operations. For example, if you execute a
+ * CreateStack
operation with the token token1
, then all the
+ * StackEvents
generated by that operation will have
+ * ClientRequestToken
set as token1
.
In the console, stack operations display the client request token on the Events tab.
+ * Stack operations that are initiated from the console use the token format
+ * Console-StackOperation-ID, which helps you easily identify the
+ * stack operation . For example, if you create a stack using the console, each stack event
+ * would be assigned the same token in the following format:
+ * Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002
.
The name or unique ID of the stack set associated with the stack instances.
+ */ + StackSetName: string | undefined; + /** *[Self-managed
permissions] The names of one or more AWS accounts for which you want to update parameter values
* for stack instances. The overridden parameter values will be applied to all stack instances
@@ -7149,22 +7215,6 @@ export interface UpdateStackInstancesInput {
*/
Accounts?: string[];
- /**
- *
The unique identifier for this stack set operation.
- *The operation ID also functions as an idempotency token, to ensure that AWS - * CloudFormation performs the stack set operation only once, even if you retry the request - * multiple times. You might retry stack set operation requests to ensure that AWS - * CloudFormation successfully received them.
- *If you don't specify an operation ID, the SDK generates one automatically. - *
- */ - OperationId?: string; - - /** - *The name or unique ID of the stack set associated with the stack instances.
- */ - StackSetName: string | undefined; - /** *[Service-managed
permissions] The AWS Organizations accounts for which you want to update parameter values for stack instances. If your update targets OUs, the overridden parameter values only apply to the accounts that are currently in the target OUs and their child OUs. Accounts added to the target OUs and their child OUs in the future won't use the overridden values.
You can specify Accounts
or DeploymentTargets
, but not both.
Preferences for how AWS CloudFormation performs this stack set operation.
*/ OperationPreferences?: StackSetOperationPreferences; + + /** + *The unique identifier for this stack set operation.
+ *The operation ID also functions as an idempotency token, to ensure that AWS + * CloudFormation performs the stack set operation only once, even if you retry the request + * multiple times. You might retry stack set operation requests to ensure that AWS + * CloudFormation successfully received them.
+ *If you don't specify an operation ID, the SDK generates one automatically. + *
+ */ + OperationId?: string; } export namespace UpdateStackInstancesInput { @@ -7252,18 +7313,14 @@ export namespace UpdateStackInstancesOutput { export interface UpdateStackSetInput { /** - *A list of input parameters for the stack set template.
+ *The name or unique ID of the stack set that you want to update.
*/ - Parameters?: Parameter[]; + StackSetName: string | undefined; /** - *Use the existing template that's associated with the stack set that you're - * updating.
- *Conditional: You must specify only one of the following parameters:
- * TemplateBody
or TemplateURL
—or set
- * UsePreviousTemplate
to true.
A brief description of updates that you are making.
*/ - UsePreviousTemplate?: boolean; + Description?: string; /** *The structure that contains the template body, with a minimum length of 1 byte and a @@ -7276,26 +7333,29 @@ export interface UpdateStackSetInput { TemplateBody?: string; /** - *
[Service-managed
permissions] The AWS Organizations accounts in which to update associated stack instances.
To update all the stack instances associated with this stack set, do not specify DeploymentTargets
or Regions
.
If the stack set update includes changes to the template (that is, if TemplateBody
or TemplateURL
is specified), or the Parameters
, AWS CloudFormation marks all stack instances with a status of OUTDATED
prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.
The location of the file that contains the template body. The URL must point to a + * template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket. For more + * information, see Template Anatomy + * in the AWS CloudFormation User Guide.
+ *Conditional: You must specify only one of the following parameters:
+ * TemplateBody
or TemplateURL
—or set
+ * UsePreviousTemplate
to true.
The Regions in which to update associated stack instances. If you specify Regions, you - * must also specify accounts in which to update stack set instances.
- *To update all the stack instances associated with this stack set,
- * do not specify the Accounts
or Regions
properties.
If the stack set update includes changes to the template (that is, if the
- * TemplateBody
or TemplateURL
properties are specified), or the
- * Parameters
property, AWS CloudFormation marks all stack instances with a status of
- * OUTDATED
prior to updating the stack instances in the specified accounts
- * and Regions. If the stack set update does not include changes to the template or
- * parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while
- * leaving all other stack instances with their existing stack instance status.
Use the existing template that's associated with the stack set that you're + * updating.
+ *Conditional: You must specify only one of the following parameters:
+ * TemplateBody
or TemplateURL
—or set
+ * UsePreviousTemplate
to true.
A list of input parameters for the stack set template.
+ */ + Parameters?: Parameter[]; /** *In some cases, you must explicitly acknowledge that your stack template contains certain @@ -7395,75 +7455,6 @@ export interface UpdateStackSetInput { */ Capabilities?: (Capability | string)[]; - /** - *
A brief description of updates that you are making.
- */ - Description?: string; - - /** - *[Service-managed
permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).
If you specify AutoDeployment
, do not specify DeploymentTargets
or Regions
.
The Amazon Resource Number (ARN) of the IAM role to use to update this stack set.
- *Specify an IAM role only if you are using customized administrator roles to control - * which users or groups can manage specific stack sets within the same administrator account. - * For more information, see Granting - * Permissions for Stack Set Operations in the - * AWS CloudFormation User Guide.
- *If you specified a customized administrator role when you created the stack set, you - * must specify a customized administrator role, even if it is the same customized - * administrator role used with this stack set previously.
- */ - AdministrationRoleARN?: string; - - /** - *The location of the file that contains the template body. The URL must point to a - * template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket. For more - * information, see Template Anatomy - * in the AWS CloudFormation User Guide.
- *Conditional: You must specify only one of the following parameters:
- * TemplateBody
or TemplateURL
—or set
- * UsePreviousTemplate
to true.
The name of the IAM execution role to use to update the stack set. If you do not specify
- * an execution role, AWS CloudFormation uses the AWSCloudFormationStackSetExecutionRole
role
- * for the stack set operation.
Specify an IAM role only if you are using customized execution roles to control which - * stack resources users and groups can include in their stack sets. - *
- *If you specify a customized execution role, AWS CloudFormation uses that role to update the stack. - * If you do not specify a customized execution role, AWS CloudFormation performs the update using the role - * previously associated with the stack set, so long as you have permissions to perform - * operations on the stack set.
- */ - ExecutionRoleName?: string; - - /** - *[Self-managed
permissions] The accounts in which to update associated stack instances. If you specify accounts, you
- * must also specify the Regions in which to update stack set instances.
To update all the stack instances associated with this stack set,
- * do not specify the Accounts
or Regions
properties.
If the stack set update includes changes to the template (that is, if the
- * TemplateBody
or TemplateURL
properties are specified), or the
- * Parameters
property, AWS CloudFormation marks all stack instances with a status of
- * OUTDATED
prior to updating the stack instances in the specified accounts
- * and Regions. If the stack set update does not include changes to the template or
- * parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while
- * leaving all other stack instances with their existing stack instance status.
Preferences for how AWS CloudFormation performs this stack set operation.
- */ - OperationPreferences?: StackSetOperationPreferences; - /** *The key-value pairs to associate with this stack set and the stacks created from it. * AWS CloudFormation also propagates these tags to supported resources that are created in @@ -7499,9 +7490,43 @@ export interface UpdateStackSetInput { Tags?: Tag[]; /** - *
The name or unique ID of the stack set that you want to update.
+ *Preferences for how AWS CloudFormation performs this stack set operation.
*/ - StackSetName: string | undefined; + OperationPreferences?: StackSetOperationPreferences; + + /** + *The Amazon Resource Number (ARN) of the IAM role to use to update this stack set.
+ *Specify an IAM role only if you are using customized administrator roles to control + * which users or groups can manage specific stack sets within the same administrator account. + * For more information, see Granting + * Permissions for Stack Set Operations in the + * AWS CloudFormation User Guide.
+ *If you specified a customized administrator role when you created the stack set, you + * must specify a customized administrator role, even if it is the same customized + * administrator role used with this stack set previously.
+ */ + AdministrationRoleARN?: string; + + /** + *The name of the IAM execution role to use to update the stack set. If you do not specify
+ * an execution role, AWS CloudFormation uses the AWSCloudFormationStackSetExecutionRole
role
+ * for the stack set operation.
Specify an IAM role only if you are using customized execution roles to control which + * stack resources users and groups can include in their stack sets. + *
+ *If you specify a customized execution role, AWS CloudFormation uses that role to update the stack. + * If you do not specify a customized execution role, AWS CloudFormation performs the update using the role + * previously associated with the stack set, so long as you have permissions to perform + * operations on the stack set.
+ */ + ExecutionRoleName?: string; + + /** + *[Service-managed
permissions] The AWS Organizations accounts in which to update associated stack instances.
To update all the stack instances associated with this stack set, do not specify DeploymentTargets
or Regions
.
If the stack set update includes changes to the template (that is, if TemplateBody
or TemplateURL
is specified), or the Parameters
, AWS CloudFormation marks all stack instances with a status of OUTDATED
prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status.
Describes how the IAM roles required for stack set operations are created. You cannot modify PermissionModel
if there are stack instances associated with your stack set.
[Service-managed
permissions] Describes whether StackSets automatically deploys to AWS Organizations accounts that are added to a target organization or organizational unit (OU).
If you specify AutoDeployment
, do not specify DeploymentTargets
or Regions
.
The unique ID for this stack set operation.
*The operation ID also functions as an idempotency token, to ensure that AWS
@@ -7528,6 +7559,36 @@ export interface UpdateStackSetInput {
* instances whose status is OUTDATED
.
[Self-managed
permissions] The accounts in which to update associated stack instances. If you specify accounts, you
+ * must also specify the Regions in which to update stack set instances.
To update all the stack instances associated with this stack set,
+ * do not specify the Accounts
or Regions
properties.
If the stack set update includes changes to the template (that is, if the
+ * TemplateBody
or TemplateURL
properties are specified), or the
+ * Parameters
property, AWS CloudFormation marks all stack instances with a status of
+ * OUTDATED
prior to updating the stack instances in the specified accounts
+ * and Regions. If the stack set update does not include changes to the template or
+ * parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while
+ * leaving all other stack instances with their existing stack instance status.
The Regions in which to update associated stack instances. If you specify Regions, you + * must also specify accounts in which to update stack set instances.
+ *To update all the stack instances associated with this stack set,
+ * do not specify the Accounts
or Regions
properties.
If the stack set update includes changes to the template (that is, if the
+ * TemplateBody
or TemplateURL
properties are specified), or the
+ * Parameters
property, AWS CloudFormation marks all stack instances with a status of
+ * OUTDATED
prior to updating the stack instances in the specified accounts
+ * and Regions. If the stack set update does not include changes to the template or
+ * parameters, AWS CloudFormation updates the stack instances in the specified accounts and Regions, while
+ * leaving all other stack instances with their existing stack instance status.
Location of file containing the template body. The URL must point to a template (max - * size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to - * Template Anatomy + *
Structure containing the template body with a minimum length of 1 byte and a maximum + * length of 51,200 bytes. For more information, go to Template Anatomy * in the AWS CloudFormation User Guide.
*Conditional: You must pass TemplateURL
or TemplateBody
. If
* both are passed, only TemplateBody
is used.
Structure containing the template body with a minimum length of 1 byte and a maximum - * length of 51,200 bytes. For more information, go to Template Anatomy + *
Location of file containing the template body. The URL must point to a template (max + * size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to + * Template Anatomy * in the AWS CloudFormation User Guide.
*Conditional: You must pass TemplateURL
or TemplateBody
. If
* both are passed, only TemplateBody
is used.
User defined description associated with the parameter.
- */ - Description?: string; - /** *The default value associated with the parameter.
*/ @@ -7635,6 +7691,11 @@ export interface TemplateParameter { * UIs. */ NoEcho?: boolean; + + /** + *User defined description associated with the parameter.
+ */ + Description?: string; } export namespace TemplateParameter { @@ -7648,14 +7709,14 @@ export namespace TemplateParameter { */ export interface ValidateTemplateOutput { /** - *The description found within the template.
+ *A list of TemplateParameter
structures.
A list of the transforms that are declared in the template.
+ *The description found within the template.
*/ - DeclaredTransforms?: string[]; + Description?: string; /** *The capabilities found within the template. If your template contains IAM resources, @@ -7673,9 +7734,9 @@ export interface ValidateTemplateOutput { CapabilitiesReason?: string; /** - *
A list of TemplateParameter
structures.
A list of the transforms that are declared in the template.
*/ - Parameters?: TemplateParameter[]; + DeclaredTransforms?: string[]; } export namespace ValidateTemplateOutput { diff --git a/clients/client-cloudformation/protocols/Aws_query.ts b/clients/client-cloudformation/protocols/Aws_query.ts index bcdffd585463..b8207f0ae770 100644 --- a/clients/client-cloudformation/protocols/Aws_query.ts +++ b/clients/client-cloudformation/protocols/Aws_query.ts @@ -227,6 +227,7 @@ import { ListTypesInput, ListTypesOutput, LoggingConfig, + ModuleInfo, NameAlreadyExistsException, OperationIdAlreadyExistsException, OperationInProgressException, @@ -4640,23 +4641,23 @@ const serializeAws_queryAccountList = (input: string[], context: __SerdeContext) const serializeAws_queryAutoDeployment = (input: AutoDeployment, context: __SerdeContext): any => { const entries: any = {}; - if (input.RetainStacksOnAccountRemoval !== undefined) { - entries["RetainStacksOnAccountRemoval"] = input.RetainStacksOnAccountRemoval; - } if (input.Enabled !== undefined) { entries["Enabled"] = input.Enabled; } + if (input.RetainStacksOnAccountRemoval !== undefined) { + entries["RetainStacksOnAccountRemoval"] = input.RetainStacksOnAccountRemoval; + } return entries; }; const serializeAws_queryCancelUpdateStackInput = (input: CancelUpdateStackInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.ClientRequestToken !== undefined) { - entries["ClientRequestToken"] = input.ClientRequestToken; - } if (input.StackName !== undefined) { entries["StackName"] = input.StackName; } + if (input.ClientRequestToken !== undefined) { + entries["ClientRequestToken"] = input.ClientRequestToken; + } return entries; }; @@ -4675,8 +4676,11 @@ const serializeAws_queryContinueUpdateRollbackInput = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.ClientRequestToken !== undefined) { - entries["ClientRequestToken"] = input.ClientRequestToken; + if (input.StackName !== undefined) { + entries["StackName"] = input.StackName; + } + if (input.RoleARN !== undefined) { + entries["RoleARN"] = input.RoleARN; } if (input.ResourcesToSkip !== undefined) { const memberEntries = serializeAws_queryResourcesToSkip(input.ResourcesToSkip, context); @@ -4685,47 +4689,26 @@ const serializeAws_queryContinueUpdateRollbackInput = ( entries[loc] = value; }); } - if (input.StackName !== undefined) { - entries["StackName"] = input.StackName; - } - if (input.RoleARN !== undefined) { - entries["RoleARN"] = input.RoleARN; + if (input.ClientRequestToken !== undefined) { + entries["ClientRequestToken"] = input.ClientRequestToken; } return entries; }; const serializeAws_queryCreateChangeSetInput = (input: CreateChangeSetInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.RollbackConfiguration !== undefined) { - const memberEntries = serializeAws_queryRollbackConfiguration(input.RollbackConfiguration, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `RollbackConfiguration.${key}`; - entries[loc] = value; - }); - } - if (input.ChangeSetName !== undefined) { - entries["ChangeSetName"] = input.ChangeSetName; + if (input.StackName !== undefined) { + entries["StackName"] = input.StackName; } - if (input.Tags !== undefined) { - const memberEntries = serializeAws_queryTags(input.Tags, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Tags.${key}`; - entries[loc] = value; - }); + if (input.TemplateBody !== undefined) { + entries["TemplateBody"] = input.TemplateBody; } - if (input.NotificationARNs !== undefined) { - const memberEntries = serializeAws_queryNotificationARNs(input.NotificationARNs, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `NotificationARNs.${key}`; - entries[loc] = value; - }); + if (input.TemplateURL !== undefined) { + entries["TemplateURL"] = input.TemplateURL; } if (input.UsePreviousTemplate !== undefined) { entries["UsePreviousTemplate"] = input.UsePreviousTemplate; } - if (input.Description !== undefined) { - entries["Description"] = input.Description; - } if (input.Parameters !== undefined) { const memberEntries = serializeAws_queryParameters(input.Parameters, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4733,6 +4716,13 @@ const serializeAws_queryCreateChangeSetInput = (input: CreateChangeSetInput, con entries[loc] = value; }); } + if (input.Capabilities !== undefined) { + const memberEntries = serializeAws_queryCapabilities(input.Capabilities, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Capabilities.${key}`; + entries[loc] = value; + }); + } if (input.ResourceTypes !== undefined) { const memberEntries = serializeAws_queryResourceTypes(input.ResourceTypes, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4740,33 +4730,41 @@ const serializeAws_queryCreateChangeSetInput = (input: CreateChangeSetInput, con entries[loc] = value; }); } - if (input.ClientToken !== undefined) { - entries["ClientToken"] = input.ClientToken; - } if (input.RoleARN !== undefined) { entries["RoleARN"] = input.RoleARN; } - if (input.ChangeSetType !== undefined) { - entries["ChangeSetType"] = input.ChangeSetType; + if (input.RollbackConfiguration !== undefined) { + const memberEntries = serializeAws_queryRollbackConfiguration(input.RollbackConfiguration, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `RollbackConfiguration.${key}`; + entries[loc] = value; + }); } - if (input.StackName !== undefined) { - entries["StackName"] = input.StackName; + if (input.NotificationARNs !== undefined) { + const memberEntries = serializeAws_queryNotificationARNs(input.NotificationARNs, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `NotificationARNs.${key}`; + entries[loc] = value; + }); } - if (input.Capabilities !== undefined) { - const memberEntries = serializeAws_queryCapabilities(input.Capabilities, context); + if (input.Tags !== undefined) { + const memberEntries = serializeAws_queryTags(input.Tags, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Capabilities.${key}`; + const loc = `Tags.${key}`; entries[loc] = value; }); } - if (input.TemplateBody !== undefined) { - entries["TemplateBody"] = input.TemplateBody; + if (input.ChangeSetName !== undefined) { + entries["ChangeSetName"] = input.ChangeSetName; } - if (input.TemplateURL !== undefined) { - entries["TemplateURL"] = input.TemplateURL; + if (input.ClientToken !== undefined) { + entries["ClientToken"] = input.ClientToken; } - if (input.IncludeNestedStacks !== undefined) { - entries["IncludeNestedStacks"] = input.IncludeNestedStacks; + if (input.Description !== undefined) { + entries["Description"] = input.Description; + } + if (input.ChangeSetType !== undefined) { + entries["ChangeSetType"] = input.ChangeSetType; } if (input.ResourcesToImport !== undefined) { const memberEntries = serializeAws_queryResourcesToImport(input.ResourcesToImport, context); @@ -4775,23 +4773,22 @@ const serializeAws_queryCreateChangeSetInput = (input: CreateChangeSetInput, con entries[loc] = value; }); } + if (input.IncludeNestedStacks !== undefined) { + entries["IncludeNestedStacks"] = input.IncludeNestedStacks; + } return entries; }; const serializeAws_queryCreateStackInput = (input: CreateStackInput, context: __SerdeContext): any => { const entries: any = {}; + if (input.StackName !== undefined) { + entries["StackName"] = input.StackName; + } if (input.TemplateBody !== undefined) { entries["TemplateBody"] = input.TemplateBody; } - if (input.StackPolicyBody !== undefined) { - entries["StackPolicyBody"] = input.StackPolicyBody; - } - if (input.ResourceTypes !== undefined) { - const memberEntries = serializeAws_queryResourceTypes(input.ResourceTypes, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `ResourceTypes.${key}`; - entries[loc] = value; - }); + if (input.TemplateURL !== undefined) { + entries["TemplateURL"] = input.TemplateURL; } if (input.Parameters !== undefined) { const memberEntries = serializeAws_queryParameters(input.Parameters, context); @@ -4800,28 +4797,9 @@ const serializeAws_queryCreateStackInput = (input: CreateStackInput, context: __ entries[loc] = value; }); } - if (input.EnableTerminationProtection !== undefined) { - entries["EnableTerminationProtection"] = input.EnableTerminationProtection; - } - if (input.OnFailure !== undefined) { - entries["OnFailure"] = input.OnFailure; - } if (input.DisableRollback !== undefined) { entries["DisableRollback"] = input.DisableRollback; } - if (input.Tags !== undefined) { - const memberEntries = serializeAws_queryTags(input.Tags, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Tags.${key}`; - entries[loc] = value; - }); - } - if (input.StackName !== undefined) { - entries["StackName"] = input.StackName; - } - if (input.TimeoutInMinutes !== undefined) { - entries["TimeoutInMinutes"] = input.TimeoutInMinutes; - } if (input.RollbackConfiguration !== undefined) { const memberEntries = serializeAws_queryRollbackConfiguration(input.RollbackConfiguration, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4829,8 +4807,8 @@ const serializeAws_queryCreateStackInput = (input: CreateStackInput, context: __ entries[loc] = value; }); } - if (input.ClientRequestToken !== undefined) { - entries["ClientRequestToken"] = input.ClientRequestToken; + if (input.TimeoutInMinutes !== undefined) { + entries["TimeoutInMinutes"] = input.TimeoutInMinutes; } if (input.NotificationARNs !== undefined) { const memberEntries = serializeAws_queryNotificationARNs(input.NotificationARNs, context); @@ -4846,14 +4824,37 @@ const serializeAws_queryCreateStackInput = (input: CreateStackInput, context: __ entries[loc] = value; }); } - if (input.TemplateURL !== undefined) { - entries["TemplateURL"] = input.TemplateURL; + if (input.ResourceTypes !== undefined) { + const memberEntries = serializeAws_queryResourceTypes(input.ResourceTypes, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `ResourceTypes.${key}`; + entries[loc] = value; + }); + } + if (input.RoleARN !== undefined) { + entries["RoleARN"] = input.RoleARN; + } + if (input.OnFailure !== undefined) { + entries["OnFailure"] = input.OnFailure; + } + if (input.StackPolicyBody !== undefined) { + entries["StackPolicyBody"] = input.StackPolicyBody; } if (input.StackPolicyURL !== undefined) { entries["StackPolicyURL"] = input.StackPolicyURL; } - if (input.RoleARN !== undefined) { - entries["RoleARN"] = input.RoleARN; + if (input.Tags !== undefined) { + const memberEntries = serializeAws_queryTags(input.Tags, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Tags.${key}`; + entries[loc] = value; + }); + } + if (input.ClientRequestToken !== undefined) { + entries["ClientRequestToken"] = input.ClientRequestToken; + } + if (input.EnableTerminationProtection !== undefined) { + entries["EnableTerminationProtection"] = input.EnableTerminationProtection; } return entries; }; @@ -4863,6 +4864,23 @@ const serializeAws_queryCreateStackInstancesInput = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.StackSetName !== undefined) { + entries["StackSetName"] = input.StackSetName; + } + if (input.Accounts !== undefined) { + const memberEntries = serializeAws_queryAccountList(input.Accounts, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Accounts.${key}`; + entries[loc] = value; + }); + } + if (input.DeploymentTargets !== undefined) { + const memberEntries = serializeAws_queryDeploymentTargets(input.DeploymentTargets, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `DeploymentTargets.${key}`; + entries[loc] = value; + }); + } if (input.Regions !== undefined) { const memberEntries = serializeAws_queryRegionList(input.Regions, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4884,44 +4902,26 @@ const serializeAws_queryCreateStackInstancesInput = ( entries[loc] = value; }); } - if (input.Accounts !== undefined) { - const memberEntries = serializeAws_queryAccountList(input.Accounts, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Accounts.${key}`; - entries[loc] = value; - }); - } if (input.OperationId === undefined) { input.OperationId = generateIdempotencyToken(); } if (input.OperationId !== undefined) { entries["OperationId"] = input.OperationId; } - if (input.StackSetName !== undefined) { - entries["StackSetName"] = input.StackSetName; - } - if (input.DeploymentTargets !== undefined) { - const memberEntries = serializeAws_queryDeploymentTargets(input.DeploymentTargets, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `DeploymentTargets.${key}`; - entries[loc] = value; - }); - } return entries; }; const serializeAws_queryCreateStackSetInput = (input: CreateStackSetInput, context: __SerdeContext): any => { const entries: any = {}; + if (input.StackSetName !== undefined) { + entries["StackSetName"] = input.StackSetName; + } + if (input.Description !== undefined) { + entries["Description"] = input.Description; + } if (input.TemplateBody !== undefined) { entries["TemplateBody"] = input.TemplateBody; } - if (input.Tags !== undefined) { - const memberEntries = serializeAws_queryTags(input.Tags, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Tags.${key}`; - entries[loc] = value; - }); - } if (input.TemplateURL !== undefined) { entries["TemplateURL"] = input.TemplateURL; } @@ -4932,9 +4932,6 @@ const serializeAws_queryCreateStackSetInput = (input: CreateStackSetInput, conte entries[loc] = value; }); } - if (input.PermissionModel !== undefined) { - entries["PermissionModel"] = input.PermissionModel; - } if (input.Capabilities !== undefined) { const memberEntries = serializeAws_queryCapabilities(input.Capabilities, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4942,8 +4939,21 @@ const serializeAws_queryCreateStackSetInput = (input: CreateStackSetInput, conte entries[loc] = value; }); } - if (input.Description !== undefined) { - entries["Description"] = input.Description; + if (input.Tags !== undefined) { + const memberEntries = serializeAws_queryTags(input.Tags, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Tags.${key}`; + entries[loc] = value; + }); + } + if (input.AdministrationRoleARN !== undefined) { + entries["AdministrationRoleARN"] = input.AdministrationRoleARN; + } + if (input.ExecutionRoleName !== undefined) { + entries["ExecutionRoleName"] = input.ExecutionRoleName; + } + if (input.PermissionModel !== undefined) { + entries["PermissionModel"] = input.PermissionModel; } if (input.AutoDeployment !== undefined) { const memberEntries = serializeAws_queryAutoDeployment(input.AutoDeployment, context); @@ -4952,37 +4962,31 @@ const serializeAws_queryCreateStackSetInput = (input: CreateStackSetInput, conte entries[loc] = value; }); } - if (input.ExecutionRoleName !== undefined) { - entries["ExecutionRoleName"] = input.ExecutionRoleName; - } if (input.ClientRequestToken === undefined) { input.ClientRequestToken = generateIdempotencyToken(); } if (input.ClientRequestToken !== undefined) { entries["ClientRequestToken"] = input.ClientRequestToken; } - if (input.AdministrationRoleARN !== undefined) { - entries["AdministrationRoleARN"] = input.AdministrationRoleARN; - } - if (input.StackSetName !== undefined) { - entries["StackSetName"] = input.StackSetName; - } return entries; }; const serializeAws_queryDeleteChangeSetInput = (input: DeleteChangeSetInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.StackName !== undefined) { - entries["StackName"] = input.StackName; - } if (input.ChangeSetName !== undefined) { entries["ChangeSetName"] = input.ChangeSetName; } + if (input.StackName !== undefined) { + entries["StackName"] = input.StackName; + } return entries; }; const serializeAws_queryDeleteStackInput = (input: DeleteStackInput, context: __SerdeContext): any => { const entries: any = {}; + if (input.StackName !== undefined) { + entries["StackName"] = input.StackName; + } if (input.RetainResources !== undefined) { const memberEntries = serializeAws_queryRetainResources(input.RetainResources, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4996,9 +5000,6 @@ const serializeAws_queryDeleteStackInput = (input: DeleteStackInput, context: __ if (input.ClientRequestToken !== undefined) { entries["ClientRequestToken"] = input.ClientRequestToken; } - if (input.StackName !== undefined) { - entries["StackName"] = input.StackName; - } return entries; }; @@ -5007,6 +5008,23 @@ const serializeAws_queryDeleteStackInstancesInput = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.StackSetName !== undefined) { + entries["StackSetName"] = input.StackSetName; + } + if (input.Accounts !== undefined) { + const memberEntries = serializeAws_queryAccountList(input.Accounts, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Accounts.${key}`; + entries[loc] = value; + }); + } + if (input.DeploymentTargets !== undefined) { + const memberEntries = serializeAws_queryDeploymentTargets(input.DeploymentTargets, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `DeploymentTargets.${key}`; + entries[loc] = value; + }); + } if (input.Regions !== undefined) { const memberEntries = serializeAws_queryRegionList(input.Regions, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5014,12 +5032,6 @@ const serializeAws_queryDeleteStackInstancesInput = ( entries[loc] = value; }); } - if (input.OperationId === undefined) { - input.OperationId = generateIdempotencyToken(); - } - if (input.OperationId !== undefined) { - entries["OperationId"] = input.OperationId; - } if (input.OperationPreferences !== undefined) { const memberEntries = serializeAws_queryStackSetOperationPreferences(input.OperationPreferences, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5027,25 +5039,14 @@ const serializeAws_queryDeleteStackInstancesInput = ( entries[loc] = value; }); } - if (input.Accounts !== undefined) { - const memberEntries = serializeAws_queryAccountList(input.Accounts, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Accounts.${key}`; - entries[loc] = value; - }); - } if (input.RetainStacks !== undefined) { entries["RetainStacks"] = input.RetainStacks; } - if (input.StackSetName !== undefined) { - entries["StackSetName"] = input.StackSetName; + if (input.OperationId === undefined) { + input.OperationId = generateIdempotencyToken(); } - if (input.DeploymentTargets !== undefined) { - const memberEntries = serializeAws_queryDeploymentTargets(input.DeploymentTargets, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `DeploymentTargets.${key}`; - entries[loc] = value; - }); + if (input.OperationId !== undefined) { + entries["OperationId"] = input.OperationId; } return entries; }; @@ -5060,17 +5061,17 @@ const serializeAws_queryDeleteStackSetInput = (input: DeleteStackSetInput, conte const serializeAws_queryDeploymentTargets = (input: DeploymentTargets, context: __SerdeContext): any => { const entries: any = {}; - if (input.OrganizationalUnitIds !== undefined) { - const memberEntries = serializeAws_queryOrganizationalUnitIdList(input.OrganizationalUnitIds, context); + if (input.Accounts !== undefined) { + const memberEntries = serializeAws_queryAccountList(input.Accounts, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `OrganizationalUnitIds.${key}`; + const loc = `Accounts.${key}`; entries[loc] = value; }); } - if (input.Accounts !== undefined) { - const memberEntries = serializeAws_queryAccountList(input.Accounts, context); + if (input.OrganizationalUnitIds !== undefined) { + const memberEntries = serializeAws_queryOrganizationalUnitIdList(input.OrganizationalUnitIds, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Accounts.${key}`; + const loc = `OrganizationalUnitIds.${key}`; entries[loc] = value; }); } @@ -5079,8 +5080,8 @@ const serializeAws_queryDeploymentTargets = (input: DeploymentTargets, context: const serializeAws_queryDeregisterTypeInput = (input: DeregisterTypeInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.VersionId !== undefined) { - entries["VersionId"] = input.VersionId; + if (input.Arn !== undefined) { + entries["Arn"] = input.Arn; } if (input.Type !== undefined) { entries["Type"] = input.Type; @@ -5088,8 +5089,8 @@ const serializeAws_queryDeregisterTypeInput = (input: DeregisterTypeInput, conte if (input.TypeName !== undefined) { entries["TypeName"] = input.TypeName; } - if (input.Arn !== undefined) { - entries["Arn"] = input.Arn; + if (input.VersionId !== undefined) { + entries["VersionId"] = input.VersionId; } return entries; }; @@ -5110,12 +5111,12 @@ const serializeAws_queryDescribeChangeSetInput = (input: DescribeChangeSetInput, if (input.ChangeSetName !== undefined) { entries["ChangeSetName"] = input.ChangeSetName; } - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; - } if (input.StackName !== undefined) { entries["StackName"] = input.StackName; } + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; + } return entries; }; @@ -5146,15 +5147,15 @@ const serializeAws_queryDescribeStackInstanceInput = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.StackInstanceRegion !== undefined) { - entries["StackInstanceRegion"] = input.StackInstanceRegion; - } if (input.StackSetName !== undefined) { entries["StackSetName"] = input.StackSetName; } if (input.StackInstanceAccount !== undefined) { entries["StackInstanceAccount"] = input.StackInstanceAccount; } + if (input.StackInstanceRegion !== undefined) { + entries["StackInstanceRegion"] = input.StackInstanceRegion; + } return entries; }; @@ -5166,9 +5167,6 @@ const serializeAws_queryDescribeStackResourceDriftsInput = ( if (input.StackName !== undefined) { entries["StackName"] = input.StackName; } - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; - } if (input.StackResourceDriftStatusFilters !== undefined) { const memberEntries = serializeAws_queryStackResourceDriftStatusFilters( input.StackResourceDriftStatusFilters, @@ -5179,6 +5177,9 @@ const serializeAws_queryDescribeStackResourceDriftsInput = ( entries[loc] = value; }); } + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; + } if (input.MaxResults !== undefined) { entries["MaxResults"] = input.MaxResults; } @@ -5204,15 +5205,15 @@ const serializeAws_queryDescribeStackResourcesInput = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.PhysicalResourceId !== undefined) { - entries["PhysicalResourceId"] = input.PhysicalResourceId; - } if (input.StackName !== undefined) { entries["StackName"] = input.StackName; } if (input.LogicalResourceId !== undefined) { entries["LogicalResourceId"] = input.LogicalResourceId; } + if (input.PhysicalResourceId !== undefined) { + entries["PhysicalResourceId"] = input.PhysicalResourceId; + } return entries; }; @@ -5251,8 +5252,8 @@ const serializeAws_queryDescribeStacksInput = (input: DescribeStacksInput, conte const serializeAws_queryDescribeTypeInput = (input: DescribeTypeInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.VersionId !== undefined) { - entries["VersionId"] = input.VersionId; + if (input.Type !== undefined) { + entries["Type"] = input.Type; } if (input.TypeName !== undefined) { entries["TypeName"] = input.TypeName; @@ -5260,8 +5261,8 @@ const serializeAws_queryDescribeTypeInput = (input: DescribeTypeInput, context: if (input.Arn !== undefined) { entries["Arn"] = input.Arn; } - if (input.Type !== undefined) { - entries["Type"] = input.Type; + if (input.VersionId !== undefined) { + entries["VersionId"] = input.VersionId; } return entries; }; @@ -5279,6 +5280,9 @@ const serializeAws_queryDescribeTypeRegistrationInput = ( const serializeAws_queryDetectStackDriftInput = (input: DetectStackDriftInput, context: __SerdeContext): any => { const entries: any = {}; + if (input.StackName !== undefined) { + entries["StackName"] = input.StackName; + } if (input.LogicalResourceIds !== undefined) { const memberEntries = serializeAws_queryLogicalResourceIds(input.LogicalResourceIds, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5286,9 +5290,6 @@ const serializeAws_queryDetectStackDriftInput = (input: DetectStackDriftInput, c entries[loc] = value; }); } - if (input.StackName !== undefined) { - entries["StackName"] = input.StackName; - } return entries; }; @@ -5297,12 +5298,12 @@ const serializeAws_queryDetectStackResourceDriftInput = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.LogicalResourceId !== undefined) { - entries["LogicalResourceId"] = input.LogicalResourceId; - } if (input.StackName !== undefined) { entries["StackName"] = input.StackName; } + if (input.LogicalResourceId !== undefined) { + entries["LogicalResourceId"] = input.LogicalResourceId; + } return entries; }; @@ -5311,12 +5312,6 @@ const serializeAws_queryDetectStackSetDriftInput = (input: DetectStackSetDriftIn if (input.StackSetName !== undefined) { entries["StackSetName"] = input.StackSetName; } - if (input.OperationId === undefined) { - input.OperationId = generateIdempotencyToken(); - } - if (input.OperationId !== undefined) { - entries["OperationId"] = input.OperationId; - } if (input.OperationPreferences !== undefined) { const memberEntries = serializeAws_queryStackSetOperationPreferences(input.OperationPreferences, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5324,6 +5319,12 @@ const serializeAws_queryDetectStackSetDriftInput = (input: DetectStackSetDriftIn entries[loc] = value; }); } + if (input.OperationId === undefined) { + input.OperationId = generateIdempotencyToken(); + } + if (input.OperationId !== undefined) { + entries["OperationId"] = input.OperationId; + } return entries; }; @@ -5332,6 +5333,12 @@ const serializeAws_queryEstimateTemplateCostInput = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.TemplateBody !== undefined) { + entries["TemplateBody"] = input.TemplateBody; + } + if (input.TemplateURL !== undefined) { + entries["TemplateURL"] = input.TemplateURL; + } if (input.Parameters !== undefined) { const memberEntries = serializeAws_queryParameters(input.Parameters, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5339,26 +5346,20 @@ const serializeAws_queryEstimateTemplateCostInput = ( entries[loc] = value; }); } - if (input.TemplateURL !== undefined) { - entries["TemplateURL"] = input.TemplateURL; - } - if (input.TemplateBody !== undefined) { - entries["TemplateBody"] = input.TemplateBody; - } return entries; }; const serializeAws_queryExecuteChangeSetInput = (input: ExecuteChangeSetInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.ClientRequestToken !== undefined) { - entries["ClientRequestToken"] = input.ClientRequestToken; - } if (input.ChangeSetName !== undefined) { entries["ChangeSetName"] = input.ChangeSetName; } if (input.StackName !== undefined) { entries["StackName"] = input.StackName; } + if (input.ClientRequestToken !== undefined) { + entries["ClientRequestToken"] = input.ClientRequestToken; + } return entries; }; @@ -5375,29 +5376,29 @@ const serializeAws_queryGetTemplateInput = (input: GetTemplateInput, context: __ if (input.StackName !== undefined) { entries["StackName"] = input.StackName; } - if (input.TemplateStage !== undefined) { - entries["TemplateStage"] = input.TemplateStage; - } if (input.ChangeSetName !== undefined) { entries["ChangeSetName"] = input.ChangeSetName; } + if (input.TemplateStage !== undefined) { + entries["TemplateStage"] = input.TemplateStage; + } return entries; }; const serializeAws_queryGetTemplateSummaryInput = (input: GetTemplateSummaryInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.StackSetName !== undefined) { - entries["StackSetName"] = input.StackSetName; - } - if (input.StackName !== undefined) { - entries["StackName"] = input.StackName; - } if (input.TemplateBody !== undefined) { entries["TemplateBody"] = input.TemplateBody; } if (input.TemplateURL !== undefined) { entries["TemplateURL"] = input.TemplateURL; } + if (input.StackName !== undefined) { + entries["StackName"] = input.StackName; + } + if (input.StackSetName !== undefined) { + entries["StackSetName"] = input.StackSetName; + } return entries; }; @@ -5422,12 +5423,12 @@ const serializeAws_queryListExportsInput = (input: ListExportsInput, context: __ const serializeAws_queryListImportsInput = (input: ListImportsInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; - } if (input.ExportName !== undefined) { entries["ExportName"] = input.ExportName; } + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; + } return entries; }; @@ -5436,15 +5437,9 @@ const serializeAws_queryListStackInstancesInput = (input: ListStackInstancesInpu if (input.StackSetName !== undefined) { entries["StackSetName"] = input.StackSetName; } - if (input.StackInstanceRegion !== undefined) { - entries["StackInstanceRegion"] = input.StackInstanceRegion; - } if (input.NextToken !== undefined) { entries["NextToken"] = input.NextToken; } - if (input.StackInstanceAccount !== undefined) { - entries["StackInstanceAccount"] = input.StackInstanceAccount; - } if (input.MaxResults !== undefined) { entries["MaxResults"] = input.MaxResults; } @@ -5455,17 +5450,23 @@ const serializeAws_queryListStackInstancesInput = (input: ListStackInstancesInpu entries[loc] = value; }); } + if (input.StackInstanceAccount !== undefined) { + entries["StackInstanceAccount"] = input.StackInstanceAccount; + } + if (input.StackInstanceRegion !== undefined) { + entries["StackInstanceRegion"] = input.StackInstanceRegion; + } return entries; }; const serializeAws_queryListStackResourcesInput = (input: ListStackResourcesInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; - } if (input.StackName !== undefined) { entries["StackName"] = input.StackName; } + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; + } return entries; }; @@ -5474,18 +5475,18 @@ const serializeAws_queryListStackSetOperationResultsInput = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; + if (input.StackSetName !== undefined) { + entries["StackSetName"] = input.StackSetName; } if (input.OperationId !== undefined) { entries["OperationId"] = input.OperationId; } + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; + } if (input.MaxResults !== undefined) { entries["MaxResults"] = input.MaxResults; } - if (input.StackSetName !== undefined) { - entries["StackSetName"] = input.StackSetName; - } return entries; }; @@ -5494,28 +5495,28 @@ const serializeAws_queryListStackSetOperationsInput = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.StackSetName !== undefined) { + entries["StackSetName"] = input.StackSetName; + } if (input.NextToken !== undefined) { entries["NextToken"] = input.NextToken; } if (input.MaxResults !== undefined) { entries["MaxResults"] = input.MaxResults; } - if (input.StackSetName !== undefined) { - entries["StackSetName"] = input.StackSetName; - } return entries; }; const serializeAws_queryListStackSetsInput = (input: ListStackSetsInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.Status !== undefined) { - entries["Status"] = input.Status; + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; } if (input.MaxResults !== undefined) { entries["MaxResults"] = input.MaxResults; } - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; + if (input.Status !== undefined) { + entries["Status"] = input.Status; } return entries; }; @@ -5543,75 +5544,78 @@ const serializeAws_queryListTypeRegistrationsInput = ( if (input.Type !== undefined) { entries["Type"] = input.Type; } - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; + if (input.TypeName !== undefined) { + entries["TypeName"] = input.TypeName; } - if (input.MaxResults !== undefined) { - entries["MaxResults"] = input.MaxResults; + if (input.TypeArn !== undefined) { + entries["TypeArn"] = input.TypeArn; } if (input.RegistrationStatusFilter !== undefined) { entries["RegistrationStatusFilter"] = input.RegistrationStatusFilter; } - if (input.TypeArn !== undefined) { - entries["TypeArn"] = input.TypeArn; + if (input.MaxResults !== undefined) { + entries["MaxResults"] = input.MaxResults; } - if (input.TypeName !== undefined) { - entries["TypeName"] = input.TypeName; + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; } return entries; }; const serializeAws_queryListTypesInput = (input: ListTypesInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.DeprecatedStatus !== undefined) { - entries["DeprecatedStatus"] = input.DeprecatedStatus; + if (input.Visibility !== undefined) { + entries["Visibility"] = input.Visibility; } if (input.ProvisioningType !== undefined) { entries["ProvisioningType"] = input.ProvisioningType; } - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; + if (input.DeprecatedStatus !== undefined) { + entries["DeprecatedStatus"] = input.DeprecatedStatus; + } + if (input.Type !== undefined) { + entries["Type"] = input.Type; } if (input.MaxResults !== undefined) { entries["MaxResults"] = input.MaxResults; } - if (input.Visibility !== undefined) { - entries["Visibility"] = input.Visibility; + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; } return entries; }; const serializeAws_queryListTypeVersionsInput = (input: ListTypeVersionsInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; - } if (input.Type !== undefined) { entries["Type"] = input.Type; } - if (input.MaxResults !== undefined) { - entries["MaxResults"] = input.MaxResults; - } - if (input.DeprecatedStatus !== undefined) { - entries["DeprecatedStatus"] = input.DeprecatedStatus; - } if (input.TypeName !== undefined) { entries["TypeName"] = input.TypeName; } if (input.Arn !== undefined) { entries["Arn"] = input.Arn; } + if (input.MaxResults !== undefined) { + entries["MaxResults"] = input.MaxResults; + } + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; + } + if (input.DeprecatedStatus !== undefined) { + entries["DeprecatedStatus"] = input.DeprecatedStatus; + } return entries; }; const serializeAws_queryLoggingConfig = (input: LoggingConfig, context: __SerdeContext): any => { const entries: any = {}; - if (input.LogGroupName !== undefined) { - entries["LogGroupName"] = input.LogGroupName; - } if (input.LogRoleArn !== undefined) { entries["LogRoleArn"] = input.LogRoleArn; } + if (input.LogGroupName !== undefined) { + entries["LogGroupName"] = input.LogGroupName; + } return entries; }; @@ -5647,18 +5651,18 @@ const serializeAws_queryOrganizationalUnitIdList = (input: string[], context: __ const serializeAws_queryParameter = (input: Parameter, context: __SerdeContext): any => { const entries: any = {}; - if (input.UsePreviousValue !== undefined) { - entries["UsePreviousValue"] = input.UsePreviousValue; + if (input.ParameterKey !== undefined) { + entries["ParameterKey"] = input.ParameterKey; } if (input.ParameterValue !== undefined) { entries["ParameterValue"] = input.ParameterValue; } + if (input.UsePreviousValue !== undefined) { + entries["UsePreviousValue"] = input.UsePreviousValue; + } if (input.ResolvedValue !== undefined) { entries["ResolvedValue"] = input.ResolvedValue; } - if (input.ParameterKey !== undefined) { - entries["ParameterKey"] = input.ParameterKey; - } return entries; }; @@ -5680,26 +5684,26 @@ const serializeAws_queryRecordHandlerProgressInput = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.ClientRequestToken !== undefined) { - entries["ClientRequestToken"] = input.ClientRequestToken; + if (input.BearerToken !== undefined) { + entries["BearerToken"] = input.BearerToken; + } + if (input.OperationStatus !== undefined) { + entries["OperationStatus"] = input.OperationStatus; } if (input.CurrentOperationStatus !== undefined) { entries["CurrentOperationStatus"] = input.CurrentOperationStatus; } - if (input.BearerToken !== undefined) { - entries["BearerToken"] = input.BearerToken; - } if (input.StatusMessage !== undefined) { entries["StatusMessage"] = input.StatusMessage; } - if (input.ResourceModel !== undefined) { - entries["ResourceModel"] = input.ResourceModel; - } if (input.ErrorCode !== undefined) { entries["ErrorCode"] = input.ErrorCode; } - if (input.OperationStatus !== undefined) { - entries["OperationStatus"] = input.OperationStatus; + if (input.ResourceModel !== undefined) { + entries["ResourceModel"] = input.ResourceModel; + } + if (input.ClientRequestToken !== undefined) { + entries["ClientRequestToken"] = input.ClientRequestToken; } return entries; }; @@ -5716,21 +5720,15 @@ const serializeAws_queryRegionList = (input: string[], context: __SerdeContext): const serializeAws_queryRegisterTypeInput = (input: RegisterTypeInput, context: __SerdeContext): any => { const entries: any = {}; + if (input.Type !== undefined) { + entries["Type"] = input.Type; + } if (input.TypeName !== undefined) { entries["TypeName"] = input.TypeName; } if (input.SchemaHandlerPackage !== undefined) { entries["SchemaHandlerPackage"] = input.SchemaHandlerPackage; } - if (input.Type !== undefined) { - entries["Type"] = input.Type; - } - if (input.ExecutionRoleArn !== undefined) { - entries["ExecutionRoleArn"] = input.ExecutionRoleArn; - } - if (input.ClientRequestToken !== undefined) { - entries["ClientRequestToken"] = input.ClientRequestToken; - } if (input.LoggingConfig !== undefined) { const memberEntries = serializeAws_queryLoggingConfig(input.LoggingConfig, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5738,6 +5736,12 @@ const serializeAws_queryRegisterTypeInput = (input: RegisterTypeInput, context: entries[loc] = value; }); } + if (input.ExecutionRoleArn !== undefined) { + entries["ExecutionRoleArn"] = input.ExecutionRoleArn; + } + if (input.ClientRequestToken !== undefined) { + entries["ClientRequestToken"] = input.ClientRequestToken; + } return entries; }; @@ -5780,6 +5784,12 @@ const serializeAws_queryResourcesToSkip = (input: string[], context: __SerdeCont const serializeAws_queryResourceToImport = (input: ResourceToImport, context: __SerdeContext): any => { const entries: any = {}; + if (input.ResourceType !== undefined) { + entries["ResourceType"] = input.ResourceType; + } + if (input.LogicalResourceId !== undefined) { + entries["LogicalResourceId"] = input.LogicalResourceId; + } if (input.ResourceIdentifier !== undefined) { const memberEntries = serializeAws_queryResourceIdentifierProperties(input.ResourceIdentifier, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5787,12 +5797,6 @@ const serializeAws_queryResourceToImport = (input: ResourceToImport, context: __ entries[loc] = value; }); } - if (input.ResourceType !== undefined) { - entries["ResourceType"] = input.ResourceType; - } - if (input.LogicalResourceId !== undefined) { - entries["LogicalResourceId"] = input.LogicalResourceId; - } return entries; }; @@ -5880,12 +5884,12 @@ const serializeAws_querySetTypeDefaultVersionInput = ( if (input.Type !== undefined) { entries["Type"] = input.Type; } - if (input.VersionId !== undefined) { - entries["VersionId"] = input.VersionId; - } if (input.TypeName !== undefined) { entries["TypeName"] = input.TypeName; } + if (input.VersionId !== undefined) { + entries["VersionId"] = input.VersionId; + } return entries; }; @@ -5894,15 +5898,15 @@ const serializeAws_querySignalResourceInput = (input: SignalResourceInput, conte if (input.StackName !== undefined) { entries["StackName"] = input.StackName; } + if (input.LogicalResourceId !== undefined) { + entries["LogicalResourceId"] = input.LogicalResourceId; + } if (input.UniqueId !== undefined) { entries["UniqueId"] = input.UniqueId; } if (input.Status !== undefined) { entries["Status"] = input.Status; } - if (input.LogicalResourceId !== undefined) { - entries["LogicalResourceId"] = input.LogicalResourceId; - } return entries; }; @@ -5955,9 +5959,6 @@ const serializeAws_queryStackSetOperationPreferences = ( entries[loc] = value; }); } - if (input.MaxConcurrentPercentage !== undefined) { - entries["MaxConcurrentPercentage"] = input.MaxConcurrentPercentage; - } if (input.FailureToleranceCount !== undefined) { entries["FailureToleranceCount"] = input.FailureToleranceCount; } @@ -5967,6 +5968,9 @@ const serializeAws_queryStackSetOperationPreferences = ( if (input.MaxConcurrentCount !== undefined) { entries["MaxConcurrentCount"] = input.MaxConcurrentCount; } + if (input.MaxConcurrentPercentage !== undefined) { + entries["MaxConcurrentPercentage"] = input.MaxConcurrentPercentage; + } return entries; }; @@ -5996,12 +6000,12 @@ const serializeAws_queryStopStackSetOperationInput = ( const serializeAws_queryTag = (input: Tag, context: __SerdeContext): any => { const entries: any = {}; - if (input.Value !== undefined) { - entries["Value"] = input.Value; - } if (input.Key !== undefined) { entries["Key"] = input.Key; } + if (input.Value !== undefined) { + entries["Value"] = input.Value; + } return entries; }; @@ -6020,9 +6024,24 @@ const serializeAws_queryTags = (input: Tag[], context: __SerdeContext): any => { const serializeAws_queryUpdateStackInput = (input: UpdateStackInput, context: __SerdeContext): any => { const entries: any = {}; + if (input.StackName !== undefined) { + entries["StackName"] = input.StackName; + } + if (input.TemplateBody !== undefined) { + entries["TemplateBody"] = input.TemplateBody; + } + if (input.TemplateURL !== undefined) { + entries["TemplateURL"] = input.TemplateURL; + } + if (input.UsePreviousTemplate !== undefined) { + entries["UsePreviousTemplate"] = input.UsePreviousTemplate; + } if (input.StackPolicyDuringUpdateBody !== undefined) { entries["StackPolicyDuringUpdateBody"] = input.StackPolicyDuringUpdateBody; } + if (input.StackPolicyDuringUpdateURL !== undefined) { + entries["StackPolicyDuringUpdateURL"] = input.StackPolicyDuringUpdateURL; + } if (input.Parameters !== undefined) { const memberEntries = serializeAws_queryParameters(input.Parameters, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -6030,47 +6049,23 @@ const serializeAws_queryUpdateStackInput = (input: UpdateStackInput, context: __ entries[loc] = value; }); } - if (input.TemplateBody !== undefined) { - entries["TemplateBody"] = input.TemplateBody; - } - if (input.StackPolicyBody !== undefined) { - entries["StackPolicyBody"] = input.StackPolicyBody; - } - if (input.StackPolicyDuringUpdateURL !== undefined) { - entries["StackPolicyDuringUpdateURL"] = input.StackPolicyDuringUpdateURL; - } - if (input.StackName !== undefined) { - entries["StackName"] = input.StackName; - } - if (input.StackPolicyURL !== undefined) { - entries["StackPolicyURL"] = input.StackPolicyURL; - } - if (input.NotificationARNs !== undefined) { - const memberEntries = serializeAws_queryNotificationARNs(input.NotificationARNs, context); + if (input.Capabilities !== undefined) { + const memberEntries = serializeAws_queryCapabilities(input.Capabilities, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `NotificationARNs.${key}`; + const loc = `Capabilities.${key}`; entries[loc] = value; }); } - if (input.Tags !== undefined) { - const memberEntries = serializeAws_queryTags(input.Tags, context); + if (input.ResourceTypes !== undefined) { + const memberEntries = serializeAws_queryResourceTypes(input.ResourceTypes, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Tags.${key}`; + const loc = `ResourceTypes.${key}`; entries[loc] = value; }); } - if (input.TemplateURL !== undefined) { - entries["TemplateURL"] = input.TemplateURL; - } - if (input.ClientRequestToken !== undefined) { - entries["ClientRequestToken"] = input.ClientRequestToken; - } if (input.RoleARN !== undefined) { entries["RoleARN"] = input.RoleARN; } - if (input.UsePreviousTemplate !== undefined) { - entries["UsePreviousTemplate"] = input.UsePreviousTemplate; - } if (input.RollbackConfiguration !== undefined) { const memberEntries = serializeAws_queryRollbackConfiguration(input.RollbackConfiguration, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -6078,20 +6073,29 @@ const serializeAws_queryUpdateStackInput = (input: UpdateStackInput, context: __ entries[loc] = value; }); } - if (input.ResourceTypes !== undefined) { - const memberEntries = serializeAws_queryResourceTypes(input.ResourceTypes, context); + if (input.StackPolicyBody !== undefined) { + entries["StackPolicyBody"] = input.StackPolicyBody; + } + if (input.StackPolicyURL !== undefined) { + entries["StackPolicyURL"] = input.StackPolicyURL; + } + if (input.NotificationARNs !== undefined) { + const memberEntries = serializeAws_queryNotificationARNs(input.NotificationARNs, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `ResourceTypes.${key}`; + const loc = `NotificationARNs.${key}`; entries[loc] = value; }); } - if (input.Capabilities !== undefined) { - const memberEntries = serializeAws_queryCapabilities(input.Capabilities, context); + if (input.Tags !== undefined) { + const memberEntries = serializeAws_queryTags(input.Tags, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Capabilities.${key}`; + const loc = `Tags.${key}`; entries[loc] = value; }); } + if (input.ClientRequestToken !== undefined) { + entries["ClientRequestToken"] = input.ClientRequestToken; + } return entries; }; @@ -6100,6 +6104,9 @@ const serializeAws_queryUpdateStackInstancesInput = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.StackSetName !== undefined) { + entries["StackSetName"] = input.StackSetName; + } if (input.Accounts !== undefined) { const memberEntries = serializeAws_queryAccountList(input.Accounts, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -6107,15 +6114,6 @@ const serializeAws_queryUpdateStackInstancesInput = ( entries[loc] = value; }); } - if (input.OperationId === undefined) { - input.OperationId = generateIdempotencyToken(); - } - if (input.OperationId !== undefined) { - entries["OperationId"] = input.OperationId; - } - if (input.StackSetName !== undefined) { - entries["StackSetName"] = input.StackSetName; - } if (input.DeploymentTargets !== undefined) { const memberEntries = serializeAws_queryDeploymentTargets(input.DeploymentTargets, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -6144,35 +6142,36 @@ const serializeAws_queryUpdateStackInstancesInput = ( entries[loc] = value; }); } + if (input.OperationId === undefined) { + input.OperationId = generateIdempotencyToken(); + } + if (input.OperationId !== undefined) { + entries["OperationId"] = input.OperationId; + } return entries; }; const serializeAws_queryUpdateStackSetInput = (input: UpdateStackSetInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.Parameters !== undefined) { - const memberEntries = serializeAws_queryParameters(input.Parameters, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Parameters.${key}`; - entries[loc] = value; - }); + if (input.StackSetName !== undefined) { + entries["StackSetName"] = input.StackSetName; } - if (input.UsePreviousTemplate !== undefined) { - entries["UsePreviousTemplate"] = input.UsePreviousTemplate; + if (input.Description !== undefined) { + entries["Description"] = input.Description; } if (input.TemplateBody !== undefined) { entries["TemplateBody"] = input.TemplateBody; } - if (input.DeploymentTargets !== undefined) { - const memberEntries = serializeAws_queryDeploymentTargets(input.DeploymentTargets, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `DeploymentTargets.${key}`; - entries[loc] = value; - }); + if (input.TemplateURL !== undefined) { + entries["TemplateURL"] = input.TemplateURL; } - if (input.Regions !== undefined) { - const memberEntries = serializeAws_queryRegionList(input.Regions, context); + if (input.UsePreviousTemplate !== undefined) { + entries["UsePreviousTemplate"] = input.UsePreviousTemplate; + } + if (input.Parameters !== undefined) { + const memberEntries = serializeAws_queryParameters(input.Parameters, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Regions.${key}`; + const loc = `Parameters.${key}`; entries[loc] = value; }); } @@ -6183,58 +6182,63 @@ const serializeAws_queryUpdateStackSetInput = (input: UpdateStackSetInput, conte entries[loc] = value; }); } - if (input.Description !== undefined) { - entries["Description"] = input.Description; + if (input.Tags !== undefined) { + const memberEntries = serializeAws_queryTags(input.Tags, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Tags.${key}`; + entries[loc] = value; + }); } - if (input.AutoDeployment !== undefined) { - const memberEntries = serializeAws_queryAutoDeployment(input.AutoDeployment, context); + if (input.OperationPreferences !== undefined) { + const memberEntries = serializeAws_queryStackSetOperationPreferences(input.OperationPreferences, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `AutoDeployment.${key}`; + const loc = `OperationPreferences.${key}`; entries[loc] = value; }); } if (input.AdministrationRoleARN !== undefined) { entries["AdministrationRoleARN"] = input.AdministrationRoleARN; } - if (input.TemplateURL !== undefined) { - entries["TemplateURL"] = input.TemplateURL; - } if (input.ExecutionRoleName !== undefined) { entries["ExecutionRoleName"] = input.ExecutionRoleName; } - if (input.Accounts !== undefined) { - const memberEntries = serializeAws_queryAccountList(input.Accounts, context); + if (input.DeploymentTargets !== undefined) { + const memberEntries = serializeAws_queryDeploymentTargets(input.DeploymentTargets, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Accounts.${key}`; + const loc = `DeploymentTargets.${key}`; entries[loc] = value; }); } - if (input.OperationPreferences !== undefined) { - const memberEntries = serializeAws_queryStackSetOperationPreferences(input.OperationPreferences, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `OperationPreferences.${key}`; - entries[loc] = value; - }); + if (input.PermissionModel !== undefined) { + entries["PermissionModel"] = input.PermissionModel; } - if (input.Tags !== undefined) { - const memberEntries = serializeAws_queryTags(input.Tags, context); + if (input.AutoDeployment !== undefined) { + const memberEntries = serializeAws_queryAutoDeployment(input.AutoDeployment, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Tags.${key}`; + const loc = `AutoDeployment.${key}`; entries[loc] = value; }); } - if (input.StackSetName !== undefined) { - entries["StackSetName"] = input.StackSetName; - } - if (input.PermissionModel !== undefined) { - entries["PermissionModel"] = input.PermissionModel; - } if (input.OperationId === undefined) { input.OperationId = generateIdempotencyToken(); } if (input.OperationId !== undefined) { entries["OperationId"] = input.OperationId; } + if (input.Accounts !== undefined) { + const memberEntries = serializeAws_queryAccountList(input.Accounts, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Accounts.${key}`; + entries[loc] = value; + }); + } + if (input.Regions !== undefined) { + const memberEntries = serializeAws_queryRegionList(input.Regions, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Regions.${key}`; + entries[loc] = value; + }); + } return entries; }; @@ -6254,26 +6258,26 @@ const serializeAws_queryUpdateTerminationProtectionInput = ( const serializeAws_queryValidateTemplateInput = (input: ValidateTemplateInput, context: __SerdeContext): any => { const entries: any = {}; - if (input.TemplateURL !== undefined) { - entries["TemplateURL"] = input.TemplateURL; - } if (input.TemplateBody !== undefined) { entries["TemplateBody"] = input.TemplateBody; } + if (input.TemplateURL !== undefined) { + entries["TemplateURL"] = input.TemplateURL; + } return entries; }; const deserializeAws_queryAccountGateResult = (output: any, context: __SerdeContext): AccountGateResult => { let contents: any = { - StatusReason: undefined, Status: undefined, + StatusReason: undefined, }; - if (output["StatusReason"] !== undefined) { - contents.StatusReason = output["StatusReason"]; - } if (output["Status"] !== undefined) { contents.Status = output["Status"]; } + if (output["StatusReason"] !== undefined) { + contents.StatusReason = output["StatusReason"]; + } return contents; }; @@ -6315,15 +6319,15 @@ const deserializeAws_queryAlreadyExistsException = (output: any, context: __Serd const deserializeAws_queryAutoDeployment = (output: any, context: __SerdeContext): AutoDeployment => { let contents: any = { - RetainStacksOnAccountRemoval: undefined, Enabled: undefined, + RetainStacksOnAccountRemoval: undefined, }; - if (output["RetainStacksOnAccountRemoval"] !== undefined) { - contents.RetainStacksOnAccountRemoval = output["RetainStacksOnAccountRemoval"] == "true"; - } if (output["Enabled"] !== undefined) { contents.Enabled = output["Enabled"] == "true"; } + if (output["RetainStacksOnAccountRemoval"] !== undefined) { + contents.RetainStacksOnAccountRemoval = output["RetainStacksOnAccountRemoval"] == "true"; + } return contents; }; @@ -6378,55 +6382,55 @@ const deserializeAws_queryChangeSetSummaries = (output: any, context: __SerdeCon const deserializeAws_queryChangeSetSummary = (output: any, context: __SerdeContext): ChangeSetSummary => { let contents: any = { - Description: undefined, - Status: undefined, - ChangeSetName: undefined, - StackName: undefined, - CreationTime: undefined, StackId: undefined, + StackName: undefined, + ChangeSetId: undefined, + ChangeSetName: undefined, + ExecutionStatus: undefined, + Status: undefined, StatusReason: undefined, + CreationTime: undefined, + Description: undefined, + IncludeNestedStacks: undefined, ParentChangeSetId: undefined, - ExecutionStatus: undefined, - ChangeSetId: undefined, RootChangeSetId: undefined, - IncludeNestedStacks: undefined, }; - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; + if (output["StackId"] !== undefined) { + contents.StackId = output["StackId"]; } - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; + if (output["StackName"] !== undefined) { + contents.StackName = output["StackName"]; + } + if (output["ChangeSetId"] !== undefined) { + contents.ChangeSetId = output["ChangeSetId"]; } if (output["ChangeSetName"] !== undefined) { contents.ChangeSetName = output["ChangeSetName"]; } - if (output["StackName"] !== undefined) { - contents.StackName = output["StackName"]; + if (output["ExecutionStatus"] !== undefined) { + contents.ExecutionStatus = output["ExecutionStatus"]; + } + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; + } + if (output["StatusReason"] !== undefined) { + contents.StatusReason = output["StatusReason"]; } if (output["CreationTime"] !== undefined) { contents.CreationTime = new Date(output["CreationTime"]); } - if (output["StackId"] !== undefined) { - contents.StackId = output["StackId"]; + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; } - if (output["StatusReason"] !== undefined) { - contents.StatusReason = output["StatusReason"]; + if (output["IncludeNestedStacks"] !== undefined) { + contents.IncludeNestedStacks = output["IncludeNestedStacks"] == "true"; } if (output["ParentChangeSetId"] !== undefined) { contents.ParentChangeSetId = output["ParentChangeSetId"]; } - if (output["ExecutionStatus"] !== undefined) { - contents.ExecutionStatus = output["ExecutionStatus"]; - } - if (output["ChangeSetId"] !== undefined) { - contents.ChangeSetId = output["ChangeSetId"]; - } if (output["RootChangeSetId"] !== undefined) { contents.RootChangeSetId = output["RootChangeSetId"]; } - if (output["IncludeNestedStacks"] !== undefined) { - contents.IncludeNestedStacks = output["IncludeNestedStacks"] == "true"; - } return contents; }; @@ -6440,15 +6444,15 @@ const deserializeAws_queryContinueUpdateRollbackOutput = ( const deserializeAws_queryCreateChangeSetOutput = (output: any, context: __SerdeContext): CreateChangeSetOutput => { let contents: any = { - StackId: undefined, Id: undefined, + StackId: undefined, }; - if (output["StackId"] !== undefined) { - contents.StackId = output["StackId"]; - } if (output["Id"] !== undefined) { contents.Id = output["Id"]; } + if (output["StackId"] !== undefined) { + contents.StackId = output["StackId"]; + } return contents; }; @@ -6523,9 +6527,15 @@ const deserializeAws_queryDeleteStackSetOutput = (output: any, context: __SerdeC const deserializeAws_queryDeploymentTargets = (output: any, context: __SerdeContext): DeploymentTargets => { let contents: any = { - OrganizationalUnitIds: undefined, Accounts: undefined, + OrganizationalUnitIds: undefined, }; + if (output.Accounts === "") { + contents.Accounts = []; + } + if (output["Accounts"] !== undefined && output["Accounts"]["member"] !== undefined) { + contents.Accounts = deserializeAws_queryAccountList(__getArrayIfSingleItem(output["Accounts"]["member"]), context); + } if (output.OrganizationalUnitIds === "") { contents.OrganizationalUnitIds = []; } @@ -6535,12 +6545,6 @@ const deserializeAws_queryDeploymentTargets = (output: any, context: __SerdeCont context ); } - if (output.Accounts === "") { - contents.Accounts = []; - } - if (output["Accounts"] !== undefined && output["Accounts"]["member"] !== undefined) { - contents.Accounts = deserializeAws_queryAccountList(__getArrayIfSingleItem(output["Accounts"]["member"]), context); - } return contents; }; @@ -6554,12 +6558,9 @@ const deserializeAws_queryDescribeAccountLimitsOutput = ( context: __SerdeContext ): DescribeAccountLimitsOutput => { let contents: any = { - NextToken: undefined, AccountLimits: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.AccountLimits === "") { contents.AccountLimits = []; } @@ -6569,43 +6570,70 @@ const deserializeAws_queryDescribeAccountLimitsOutput = ( context ); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; const deserializeAws_queryDescribeChangeSetOutput = (output: any, context: __SerdeContext): DescribeChangeSetOutput => { let contents: any = { - Changes: undefined, + ChangeSetName: undefined, + ChangeSetId: undefined, + StackId: undefined, + StackName: undefined, Description: undefined, + Parameters: undefined, + CreationTime: undefined, + ExecutionStatus: undefined, Status: undefined, + StatusReason: undefined, NotificationARNs: undefined, + RollbackConfiguration: undefined, + Capabilities: undefined, + Tags: undefined, + Changes: undefined, NextToken: undefined, IncludeNestedStacks: undefined, - Capabilities: undefined, - ChangeSetName: undefined, - Parameters: undefined, - RollbackConfiguration: undefined, - ExecutionStatus: undefined, ParentChangeSetId: undefined, - ChangeSetId: undefined, RootChangeSetId: undefined, - StackName: undefined, - StatusReason: undefined, - StackId: undefined, - Tags: undefined, - CreationTime: undefined, }; - if (output.Changes === "") { - contents.Changes = []; + if (output["ChangeSetName"] !== undefined) { + contents.ChangeSetName = output["ChangeSetName"]; } - if (output["Changes"] !== undefined && output["Changes"]["member"] !== undefined) { - contents.Changes = deserializeAws_queryChanges(__getArrayIfSingleItem(output["Changes"]["member"]), context); + if (output["ChangeSetId"] !== undefined) { + contents.ChangeSetId = output["ChangeSetId"]; + } + if (output["StackId"] !== undefined) { + contents.StackId = output["StackId"]; + } + if (output["StackName"] !== undefined) { + contents.StackName = output["StackName"]; } if (output["Description"] !== undefined) { contents.Description = output["Description"]; } + if (output.Parameters === "") { + contents.Parameters = []; + } + if (output["Parameters"] !== undefined && output["Parameters"]["member"] !== undefined) { + contents.Parameters = deserializeAws_queryParameters( + __getArrayIfSingleItem(output["Parameters"]["member"]), + context + ); + } + if (output["CreationTime"] !== undefined) { + contents.CreationTime = new Date(output["CreationTime"]); + } + if (output["ExecutionStatus"] !== undefined) { + contents.ExecutionStatus = output["ExecutionStatus"]; + } if (output["Status"] !== undefined) { contents.Status = output["Status"]; } + if (output["StatusReason"] !== undefined) { + contents.StatusReason = output["StatusReason"]; + } if (output.NotificationARNs === "") { contents.NotificationARNs = []; } @@ -6615,11 +6643,11 @@ const deserializeAws_queryDescribeChangeSetOutput = (output: any, context: __Ser context ); } - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } - if (output["IncludeNestedStacks"] !== undefined) { - contents.IncludeNestedStacks = output["IncludeNestedStacks"] == "true"; + if (output["RollbackConfiguration"] !== undefined) { + contents.RollbackConfiguration = deserializeAws_queryRollbackConfiguration( + output["RollbackConfiguration"], + context + ); } if (output.Capabilities === "") { contents.Capabilities = []; @@ -6630,54 +6658,30 @@ const deserializeAws_queryDescribeChangeSetOutput = (output: any, context: __Ser context ); } - if (output["ChangeSetName"] !== undefined) { - contents.ChangeSetName = output["ChangeSetName"]; + if (output.Tags === "") { + contents.Tags = []; } - if (output.Parameters === "") { - contents.Parameters = []; + if (output["Tags"] !== undefined && output["Tags"]["member"] !== undefined) { + contents.Tags = deserializeAws_queryTags(__getArrayIfSingleItem(output["Tags"]["member"]), context); } - if (output["Parameters"] !== undefined && output["Parameters"]["member"] !== undefined) { - contents.Parameters = deserializeAws_queryParameters( - __getArrayIfSingleItem(output["Parameters"]["member"]), - context - ); + if (output.Changes === "") { + contents.Changes = []; } - if (output["RollbackConfiguration"] !== undefined) { - contents.RollbackConfiguration = deserializeAws_queryRollbackConfiguration( - output["RollbackConfiguration"], - context - ); + if (output["Changes"] !== undefined && output["Changes"]["member"] !== undefined) { + contents.Changes = deserializeAws_queryChanges(__getArrayIfSingleItem(output["Changes"]["member"]), context); } - if (output["ExecutionStatus"] !== undefined) { - contents.ExecutionStatus = output["ExecutionStatus"]; + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } + if (output["IncludeNestedStacks"] !== undefined) { + contents.IncludeNestedStacks = output["IncludeNestedStacks"] == "true"; } if (output["ParentChangeSetId"] !== undefined) { contents.ParentChangeSetId = output["ParentChangeSetId"]; } - if (output["ChangeSetId"] !== undefined) { - contents.ChangeSetId = output["ChangeSetId"]; - } if (output["RootChangeSetId"] !== undefined) { contents.RootChangeSetId = output["RootChangeSetId"]; } - if (output["StackName"] !== undefined) { - contents.StackName = output["StackName"]; - } - if (output["StatusReason"] !== undefined) { - contents.StatusReason = output["StatusReason"]; - } - if (output["StackId"] !== undefined) { - contents.StackId = output["StackId"]; - } - if (output.Tags === "") { - contents.Tags = []; - } - if (output["Tags"] !== undefined && output["Tags"]["member"] !== undefined) { - contents.Tags = deserializeAws_queryTags(__getArrayIfSingleItem(output["Tags"]["member"]), context); - } - if (output["CreationTime"] !== undefined) { - contents.CreationTime = new Date(output["CreationTime"]); - } return contents; }; @@ -6686,34 +6690,34 @@ const deserializeAws_queryDescribeStackDriftDetectionStatusOutput = ( context: __SerdeContext ): DescribeStackDriftDetectionStatusOutput => { let contents: any = { - DriftedStackResourceCount: undefined, - DetectionStatusReason: undefined, + StackId: undefined, StackDriftDetectionId: undefined, - Timestamp: undefined, - DetectionStatus: undefined, StackDriftStatus: undefined, - StackId: undefined, + DetectionStatus: undefined, + DetectionStatusReason: undefined, + DriftedStackResourceCount: undefined, + Timestamp: undefined, }; - if (output["DriftedStackResourceCount"] !== undefined) { - contents.DriftedStackResourceCount = parseInt(output["DriftedStackResourceCount"]); - } - if (output["DetectionStatusReason"] !== undefined) { - contents.DetectionStatusReason = output["DetectionStatusReason"]; + if (output["StackId"] !== undefined) { + contents.StackId = output["StackId"]; } if (output["StackDriftDetectionId"] !== undefined) { contents.StackDriftDetectionId = output["StackDriftDetectionId"]; } - if (output["Timestamp"] !== undefined) { - contents.Timestamp = new Date(output["Timestamp"]); + if (output["StackDriftStatus"] !== undefined) { + contents.StackDriftStatus = output["StackDriftStatus"]; } if (output["DetectionStatus"] !== undefined) { contents.DetectionStatus = output["DetectionStatus"]; } - if (output["StackDriftStatus"] !== undefined) { - contents.StackDriftStatus = output["StackDriftStatus"]; + if (output["DetectionStatusReason"] !== undefined) { + contents.DetectionStatusReason = output["DetectionStatusReason"]; } - if (output["StackId"] !== undefined) { - contents.StackId = output["StackId"]; + if (output["DriftedStackResourceCount"] !== undefined) { + contents.DriftedStackResourceCount = parseInt(output["DriftedStackResourceCount"]); + } + if (output["Timestamp"] !== undefined) { + contents.Timestamp = new Date(output["Timestamp"]); } return contents; }; @@ -6723,12 +6727,9 @@ const deserializeAws_queryDescribeStackEventsOutput = ( context: __SerdeContext ): DescribeStackEventsOutput => { let contents: any = { - NextToken: undefined, StackEvents: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.StackEvents === "") { contents.StackEvents = []; } @@ -6738,6 +6739,9 @@ const deserializeAws_queryDescribeStackEventsOutput = ( context ); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; @@ -6834,85 +6838,85 @@ const deserializeAws_queryDescribeStackSetOutput = (output: any, context: __Serd const deserializeAws_queryDescribeStacksOutput = (output: any, context: __SerdeContext): DescribeStacksOutput => { let contents: any = { - NextToken: undefined, Stacks: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.Stacks === "") { contents.Stacks = []; } if (output["Stacks"] !== undefined && output["Stacks"]["member"] !== undefined) { contents.Stacks = deserializeAws_queryStacks(__getArrayIfSingleItem(output["Stacks"]["member"]), context); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; const deserializeAws_queryDescribeTypeOutput = (output: any, context: __SerdeContext): DescribeTypeOutput => { let contents: any = { - Schema: undefined, Arn: undefined, - DocumentationUrl: undefined, - Description: undefined, + Type: undefined, TypeName: undefined, - DeprecatedStatus: undefined, DefaultVersionId: undefined, - ProvisioningType: undefined, IsDefaultVersion: undefined, + Description: undefined, + Schema: undefined, + ProvisioningType: undefined, + DeprecatedStatus: undefined, LoggingConfig: undefined, - Visibility: undefined, - LastUpdated: undefined, - Type: undefined, ExecutionRoleArn: undefined, + Visibility: undefined, SourceUrl: undefined, + DocumentationUrl: undefined, + LastUpdated: undefined, TimeCreated: undefined, }; - if (output["Schema"] !== undefined) { - contents.Schema = output["Schema"]; - } if (output["Arn"] !== undefined) { contents.Arn = output["Arn"]; } - if (output["DocumentationUrl"] !== undefined) { - contents.DocumentationUrl = output["DocumentationUrl"]; - } - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; + if (output["Type"] !== undefined) { + contents.Type = output["Type"]; } if (output["TypeName"] !== undefined) { contents.TypeName = output["TypeName"]; } - if (output["DeprecatedStatus"] !== undefined) { - contents.DeprecatedStatus = output["DeprecatedStatus"]; - } if (output["DefaultVersionId"] !== undefined) { contents.DefaultVersionId = output["DefaultVersionId"]; } - if (output["ProvisioningType"] !== undefined) { - contents.ProvisioningType = output["ProvisioningType"]; - } if (output["IsDefaultVersion"] !== undefined) { contents.IsDefaultVersion = output["IsDefaultVersion"] == "true"; } - if (output["LoggingConfig"] !== undefined) { - contents.LoggingConfig = deserializeAws_queryLoggingConfig(output["LoggingConfig"], context); + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; } - if (output["Visibility"] !== undefined) { - contents.Visibility = output["Visibility"]; + if (output["Schema"] !== undefined) { + contents.Schema = output["Schema"]; } - if (output["LastUpdated"] !== undefined) { - contents.LastUpdated = new Date(output["LastUpdated"]); + if (output["ProvisioningType"] !== undefined) { + contents.ProvisioningType = output["ProvisioningType"]; } - if (output["Type"] !== undefined) { - contents.Type = output["Type"]; + if (output["DeprecatedStatus"] !== undefined) { + contents.DeprecatedStatus = output["DeprecatedStatus"]; + } + if (output["LoggingConfig"] !== undefined) { + contents.LoggingConfig = deserializeAws_queryLoggingConfig(output["LoggingConfig"], context); } if (output["ExecutionRoleArn"] !== undefined) { contents.ExecutionRoleArn = output["ExecutionRoleArn"]; } + if (output["Visibility"] !== undefined) { + contents.Visibility = output["Visibility"]; + } if (output["SourceUrl"] !== undefined) { contents.SourceUrl = output["SourceUrl"]; } + if (output["DocumentationUrl"] !== undefined) { + contents.DocumentationUrl = output["DocumentationUrl"]; + } + if (output["LastUpdated"] !== undefined) { + contents.LastUpdated = new Date(output["LastUpdated"]); + } if (output["TimeCreated"] !== undefined) { contents.TimeCreated = new Date(output["TimeCreated"]); } @@ -6924,23 +6928,23 @@ const deserializeAws_queryDescribeTypeRegistrationOutput = ( context: __SerdeContext ): DescribeTypeRegistrationOutput => { let contents: any = { - TypeVersionArn: undefined, - TypeArn: undefined, ProgressStatus: undefined, Description: undefined, + TypeArn: undefined, + TypeVersionArn: undefined, }; - if (output["TypeVersionArn"] !== undefined) { - contents.TypeVersionArn = output["TypeVersionArn"]; - } - if (output["TypeArn"] !== undefined) { - contents.TypeArn = output["TypeArn"]; - } if (output["ProgressStatus"] !== undefined) { contents.ProgressStatus = output["ProgressStatus"]; } if (output["Description"] !== undefined) { contents.Description = output["Description"]; } + if (output["TypeArn"] !== undefined) { + contents.TypeArn = output["TypeArn"]; + } + if (output["TypeVersionArn"] !== undefined) { + contents.TypeVersionArn = output["TypeVersionArn"]; + } return contents; }; @@ -7000,19 +7004,19 @@ const deserializeAws_queryExecuteChangeSetOutput = (output: any, context: __Serd const deserializeAws_queryExport = (output: any, context: __SerdeContext): Export => { let contents: any = { + ExportingStackId: undefined, Name: undefined, Value: undefined, - ExportingStackId: undefined, }; + if (output["ExportingStackId"] !== undefined) { + contents.ExportingStackId = output["ExportingStackId"]; + } if (output["Name"] !== undefined) { contents.Name = output["Name"]; } if (output["Value"] !== undefined) { contents.Value = output["Value"]; } - if (output["ExportingStackId"] !== undefined) { - contents.ExportingStackId = output["ExportingStackId"]; - } return contents; }; @@ -7055,18 +7059,27 @@ const deserializeAws_queryGetTemplateSummaryOutput = ( context: __SerdeContext ): GetTemplateSummaryOutput => { let contents: any = { - Metadata: undefined, + Parameters: undefined, + Description: undefined, Capabilities: undefined, + CapabilitiesReason: undefined, + ResourceTypes: undefined, Version: undefined, + Metadata: undefined, DeclaredTransforms: undefined, ResourceIdentifierSummaries: undefined, - CapabilitiesReason: undefined, - Parameters: undefined, - ResourceTypes: undefined, - Description: undefined, }; - if (output["Metadata"] !== undefined) { - contents.Metadata = output["Metadata"]; + if (output.Parameters === "") { + contents.Parameters = []; + } + if (output["Parameters"] !== undefined && output["Parameters"]["member"] !== undefined) { + contents.Parameters = deserializeAws_queryParameterDeclarations( + __getArrayIfSingleItem(output["Parameters"]["member"]), + context + ); + } + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; } if (output.Capabilities === "") { contents.Capabilities = []; @@ -7077,9 +7090,24 @@ const deserializeAws_queryGetTemplateSummaryOutput = ( context ); } + if (output["CapabilitiesReason"] !== undefined) { + contents.CapabilitiesReason = output["CapabilitiesReason"]; + } + if (output.ResourceTypes === "") { + contents.ResourceTypes = []; + } + if (output["ResourceTypes"] !== undefined && output["ResourceTypes"]["member"] !== undefined) { + contents.ResourceTypes = deserializeAws_queryResourceTypes( + __getArrayIfSingleItem(output["ResourceTypes"]["member"]), + context + ); + } if (output["Version"] !== undefined) { contents.Version = output["Version"]; } + if (output["Metadata"] !== undefined) { + contents.Metadata = output["Metadata"]; + } if (output.DeclaredTransforms === "") { contents.DeclaredTransforms = []; } @@ -7101,30 +7129,6 @@ const deserializeAws_queryGetTemplateSummaryOutput = ( context ); } - if (output["CapabilitiesReason"] !== undefined) { - contents.CapabilitiesReason = output["CapabilitiesReason"]; - } - if (output.Parameters === "") { - contents.Parameters = []; - } - if (output["Parameters"] !== undefined && output["Parameters"]["member"] !== undefined) { - contents.Parameters = deserializeAws_queryParameterDeclarations( - __getArrayIfSingleItem(output["Parameters"]["member"]), - context - ); - } - if (output.ResourceTypes === "") { - contents.ResourceTypes = []; - } - if (output["ResourceTypes"] !== undefined && output["ResourceTypes"]["member"] !== undefined) { - contents.ResourceTypes = deserializeAws_queryResourceTypes( - __getArrayIfSingleItem(output["ResourceTypes"]["member"]), - context - ); - } - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; - } return contents; }; @@ -7216,35 +7220,35 @@ const deserializeAws_queryListChangeSetsOutput = (output: any, context: __SerdeC const deserializeAws_queryListExportsOutput = (output: any, context: __SerdeContext): ListExportsOutput => { let contents: any = { - NextToken: undefined, Exports: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.Exports === "") { contents.Exports = []; } if (output["Exports"] !== undefined && output["Exports"]["member"] !== undefined) { contents.Exports = deserializeAws_queryExports(__getArrayIfSingleItem(output["Exports"]["member"]), context); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; const deserializeAws_queryListImportsOutput = (output: any, context: __SerdeContext): ListImportsOutput => { let contents: any = { - NextToken: undefined, Imports: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.Imports === "") { contents.Imports = []; } if (output["Imports"] !== undefined && output["Imports"]["member"] !== undefined) { contents.Imports = deserializeAws_queryImports(__getArrayIfSingleItem(output["Imports"]["member"]), context); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; @@ -7253,12 +7257,9 @@ const deserializeAws_queryListStackInstancesOutput = ( context: __SerdeContext ): ListStackInstancesOutput => { let contents: any = { - NextToken: undefined, Summaries: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.Summaries === "") { contents.Summaries = []; } @@ -7268,6 +7269,9 @@ const deserializeAws_queryListStackInstancesOutput = ( context ); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; @@ -7299,12 +7303,9 @@ const deserializeAws_queryListStackSetOperationResultsOutput = ( context: __SerdeContext ): ListStackSetOperationResultsOutput => { let contents: any = { - NextToken: undefined, Summaries: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.Summaries === "") { contents.Summaries = []; } @@ -7314,6 +7315,9 @@ const deserializeAws_queryListStackSetOperationResultsOutput = ( context ); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; @@ -7322,12 +7326,9 @@ const deserializeAws_queryListStackSetOperationsOutput = ( context: __SerdeContext ): ListStackSetOperationsOutput => { let contents: any = { - NextToken: undefined, Summaries: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.Summaries === "") { contents.Summaries = []; } @@ -7337,6 +7338,9 @@ const deserializeAws_queryListStackSetOperationsOutput = ( context ); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; @@ -7385,12 +7389,9 @@ const deserializeAws_queryListTypeRegistrationsOutput = ( context: __SerdeContext ): ListTypeRegistrationsOutput => { let contents: any = { - NextToken: undefined, RegistrationTokenList: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.RegistrationTokenList === "") { contents.RegistrationTokenList = []; } @@ -7400,6 +7401,9 @@ const deserializeAws_queryListTypeRegistrationsOutput = ( context ); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; @@ -7445,15 +7449,15 @@ const deserializeAws_queryListTypeVersionsOutput = (output: any, context: __Serd const deserializeAws_queryLoggingConfig = (output: any, context: __SerdeContext): LoggingConfig => { let contents: any = { - LogGroupName: undefined, LogRoleArn: undefined, + LogGroupName: undefined, }; - if (output["LogGroupName"] !== undefined) { - contents.LogGroupName = output["LogGroupName"]; - } if (output["LogRoleArn"] !== undefined) { contents.LogRoleArn = output["LogRoleArn"]; } + if (output["LogGroupName"] !== undefined) { + contents.LogGroupName = output["LogGroupName"]; + } return contents; }; @@ -7461,6 +7465,20 @@ const deserializeAws_queryLogicalResourceIds = (output: any, context: __SerdeCon return (output || []).map((entry: any) => entry); }; +const deserializeAws_queryModuleInfo = (output: any, context: __SerdeContext): ModuleInfo => { + let contents: any = { + TypeHierarchy: undefined, + LogicalIdHierarchy: undefined, + }; + if (output["TypeHierarchy"] !== undefined) { + contents.TypeHierarchy = output["TypeHierarchy"]; + } + if (output["LogicalIdHierarchy"] !== undefined) { + contents.LogicalIdHierarchy = output["LogicalIdHierarchy"]; + } + return contents; +}; + const deserializeAws_queryNameAlreadyExistsException = ( output: any, context: __SerdeContext @@ -7537,22 +7555,22 @@ const deserializeAws_queryOrganizationalUnitIdList = (output: any, context: __Se const deserializeAws_queryOutput = (output: any, context: __SerdeContext): Output => { let contents: any = { OutputKey: undefined, + OutputValue: undefined, Description: undefined, ExportName: undefined, - OutputValue: undefined, }; if (output["OutputKey"] !== undefined) { contents.OutputKey = output["OutputKey"]; } + if (output["OutputValue"] !== undefined) { + contents.OutputValue = output["OutputValue"]; + } if (output["Description"] !== undefined) { contents.Description = output["Description"]; } if (output["ExportName"] !== undefined) { contents.ExportName = output["ExportName"]; } - if (output["OutputValue"] !== undefined) { - contents.OutputValue = output["OutputValue"]; - } return contents; }; @@ -7562,23 +7580,23 @@ const deserializeAws_queryOutputs = (output: any, context: __SerdeContext): Outp const deserializeAws_queryParameter = (output: any, context: __SerdeContext): Parameter => { let contents: any = { - UsePreviousValue: undefined, + ParameterKey: undefined, ParameterValue: undefined, + UsePreviousValue: undefined, ResolvedValue: undefined, - ParameterKey: undefined, }; - if (output["UsePreviousValue"] !== undefined) { - contents.UsePreviousValue = output["UsePreviousValue"] == "true"; + if (output["ParameterKey"] !== undefined) { + contents.ParameterKey = output["ParameterKey"]; } if (output["ParameterValue"] !== undefined) { contents.ParameterValue = output["ParameterValue"]; } + if (output["UsePreviousValue"] !== undefined) { + contents.UsePreviousValue = output["UsePreviousValue"] == "true"; + } if (output["ResolvedValue"] !== undefined) { contents.ResolvedValue = output["ResolvedValue"]; } - if (output["ParameterKey"] !== undefined) { - contents.ParameterKey = output["ParameterKey"]; - } return contents; }; @@ -7600,30 +7618,30 @@ const deserializeAws_queryParameterConstraints = (output: any, context: __SerdeC const deserializeAws_queryParameterDeclaration = (output: any, context: __SerdeContext): ParameterDeclaration => { let contents: any = { - ParameterConstraints: undefined, ParameterKey: undefined, DefaultValue: undefined, + ParameterType: undefined, NoEcho: undefined, Description: undefined, - ParameterType: undefined, + ParameterConstraints: undefined, }; - if (output["ParameterConstraints"] !== undefined) { - contents.ParameterConstraints = deserializeAws_queryParameterConstraints(output["ParameterConstraints"], context); - } if (output["ParameterKey"] !== undefined) { contents.ParameterKey = output["ParameterKey"]; } if (output["DefaultValue"] !== undefined) { contents.DefaultValue = output["DefaultValue"]; } + if (output["ParameterType"] !== undefined) { + contents.ParameterType = output["ParameterType"]; + } if (output["NoEcho"] !== undefined) { contents.NoEcho = output["NoEcho"] == "true"; } if (output["Description"] !== undefined) { contents.Description = output["Description"]; } - if (output["ParameterType"] !== undefined) { - contents.ParameterType = output["ParameterType"]; + if (output["ParameterConstraints"] !== undefined) { + contents.ParameterConstraints = deserializeAws_queryParameterConstraints(output["ParameterConstraints"], context); } return contents; }; @@ -7663,21 +7681,21 @@ const deserializeAws_queryPhysicalResourceIdContextKeyValuePair = ( const deserializeAws_queryPropertyDifference = (output: any, context: __SerdeContext): PropertyDifference => { let contents: any = { PropertyPath: undefined, - DifferenceType: undefined, - ActualValue: undefined, ExpectedValue: undefined, + ActualValue: undefined, + DifferenceType: undefined, }; if (output["PropertyPath"] !== undefined) { contents.PropertyPath = output["PropertyPath"]; } - if (output["DifferenceType"] !== undefined) { - contents.DifferenceType = output["DifferenceType"]; + if (output["ExpectedValue"] !== undefined) { + contents.ExpectedValue = output["ExpectedValue"]; } if (output["ActualValue"] !== undefined) { contents.ActualValue = output["ActualValue"]; } - if (output["ExpectedValue"] !== undefined) { - contents.ExpectedValue = output["ExpectedValue"]; + if (output["DifferenceType"] !== undefined) { + contents.DifferenceType = output["DifferenceType"]; } return contents; }; @@ -7714,26 +7732,30 @@ const deserializeAws_queryRegistrationTokenList = (output: any, context: __Serde const deserializeAws_queryResourceChange = (output: any, context: __SerdeContext): ResourceChange => { let contents: any = { - Replacement: undefined, - PhysicalResourceId: undefined, - LogicalResourceId: undefined, Action: undefined, + LogicalResourceId: undefined, + PhysicalResourceId: undefined, + ResourceType: undefined, + Replacement: undefined, Scope: undefined, Details: undefined, - ResourceType: undefined, ChangeSetId: undefined, + ModuleInfo: undefined, }; - if (output["Replacement"] !== undefined) { - contents.Replacement = output["Replacement"]; + if (output["Action"] !== undefined) { + contents.Action = output["Action"]; + } + if (output["LogicalResourceId"] !== undefined) { + contents.LogicalResourceId = output["LogicalResourceId"]; } if (output["PhysicalResourceId"] !== undefined) { contents.PhysicalResourceId = output["PhysicalResourceId"]; } - if (output["LogicalResourceId"] !== undefined) { - contents.LogicalResourceId = output["LogicalResourceId"]; + if (output["ResourceType"] !== undefined) { + contents.ResourceType = output["ResourceType"]; } - if (output["Action"] !== undefined) { - contents.Action = output["Action"]; + if (output["Replacement"] !== undefined) { + contents.Replacement = output["Replacement"]; } if (output.Scope === "") { contents.Scope = []; @@ -7750,30 +7772,30 @@ const deserializeAws_queryResourceChange = (output: any, context: __SerdeContext context ); } - if (output["ResourceType"] !== undefined) { - contents.ResourceType = output["ResourceType"]; - } if (output["ChangeSetId"] !== undefined) { contents.ChangeSetId = output["ChangeSetId"]; } + if (output["ModuleInfo"] !== undefined) { + contents.ModuleInfo = deserializeAws_queryModuleInfo(output["ModuleInfo"], context); + } return contents; }; const deserializeAws_queryResourceChangeDetail = (output: any, context: __SerdeContext): ResourceChangeDetail => { let contents: any = { - ChangeSource: undefined, - Evaluation: undefined, Target: undefined, + Evaluation: undefined, + ChangeSource: undefined, CausingEntity: undefined, }; - if (output["ChangeSource"] !== undefined) { - contents.ChangeSource = output["ChangeSource"]; + if (output["Target"] !== undefined) { + contents.Target = deserializeAws_queryResourceTargetDefinition(output["Target"], context); } if (output["Evaluation"] !== undefined) { contents.Evaluation = output["Evaluation"]; } - if (output["Target"] !== undefined) { - contents.Target = deserializeAws_queryResourceTargetDefinition(output["Target"], context); + if (output["ChangeSource"] !== undefined) { + contents.ChangeSource = output["ChangeSource"]; } if (output["CausingEntity"] !== undefined) { contents.CausingEntity = output["CausingEntity"]; @@ -7802,21 +7824,12 @@ const deserializeAws_queryResourceIdentifierSummary = ( ): ResourceIdentifierSummary => { let contents: any = { ResourceType: undefined, - ResourceIdentifiers: undefined, LogicalResourceIds: undefined, + ResourceIdentifiers: undefined, }; if (output["ResourceType"] !== undefined) { contents.ResourceType = output["ResourceType"]; } - if (output.ResourceIdentifiers === "") { - contents.ResourceIdentifiers = []; - } - if (output["ResourceIdentifiers"] !== undefined && output["ResourceIdentifiers"]["member"] !== undefined) { - contents.ResourceIdentifiers = deserializeAws_queryResourceIdentifiers( - __getArrayIfSingleItem(output["ResourceIdentifiers"]["member"]), - context - ); - } if (output.LogicalResourceIds === "") { contents.LogicalResourceIds = []; } @@ -7826,6 +7839,15 @@ const deserializeAws_queryResourceIdentifierSummary = ( context ); } + if (output.ResourceIdentifiers === "") { + contents.ResourceIdentifiers = []; + } + if (output["ResourceIdentifiers"] !== undefined && output["ResourceIdentifiers"]["member"] !== undefined) { + contents.ResourceIdentifiers = deserializeAws_queryResourceIdentifiers( + __getArrayIfSingleItem(output["ResourceIdentifiers"]["member"]), + context + ); + } return contents; }; @@ -7834,19 +7856,19 @@ const deserializeAws_queryResourceTargetDefinition = ( context: __SerdeContext ): ResourceTargetDefinition => { let contents: any = { - RequiresRecreation: undefined, Attribute: undefined, Name: undefined, + RequiresRecreation: undefined, }; - if (output["RequiresRecreation"] !== undefined) { - contents.RequiresRecreation = output["RequiresRecreation"]; - } if (output["Attribute"] !== undefined) { contents.Attribute = output["Attribute"]; } if (output["Name"] !== undefined) { contents.Name = output["Name"]; } + if (output["RequiresRecreation"] !== undefined) { + contents.RequiresRecreation = output["RequiresRecreation"]; + } return contents; }; @@ -7906,40 +7928,40 @@ const deserializeAws_querySetTypeDefaultVersionOutput = ( const deserializeAws_queryStack = (output: any, context: __SerdeContext): Stack => { let contents: any = { - StackName: undefined, StackId: undefined, - Outputs: undefined, - Parameters: undefined, - DriftInformation: undefined, - RoleARN: undefined, - ParentId: undefined, - NotificationARNs: undefined, + StackName: undefined, ChangeSetId: undefined, - Capabilities: undefined, - TimeoutInMinutes: undefined, + Description: undefined, + Parameters: undefined, + CreationTime: undefined, + DeletionTime: undefined, LastUpdatedTime: undefined, + RollbackConfiguration: undefined, + StackStatus: undefined, StackStatusReason: undefined, DisableRollback: undefined, + NotificationARNs: undefined, + TimeoutInMinutes: undefined, + Capabilities: undefined, + Outputs: undefined, + RoleARN: undefined, Tags: undefined, - CreationTime: undefined, - StackStatus: undefined, - Description: undefined, - DeletionTime: undefined, - RollbackConfiguration: undefined, EnableTerminationProtection: undefined, + ParentId: undefined, RootId: undefined, + DriftInformation: undefined, }; - if (output["StackName"] !== undefined) { - contents.StackName = output["StackName"]; - } if (output["StackId"] !== undefined) { contents.StackId = output["StackId"]; } - if (output.Outputs === "") { - contents.Outputs = []; + if (output["StackName"] !== undefined) { + contents.StackName = output["StackName"]; + } + if (output["ChangeSetId"] !== undefined) { + contents.ChangeSetId = output["ChangeSetId"]; } - if (output["Outputs"] !== undefined && output["Outputs"]["member"] !== undefined) { - contents.Outputs = deserializeAws_queryOutputs(__getArrayIfSingleItem(output["Outputs"]["member"]), context); + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; } if (output.Parameters === "") { contents.Parameters = []; @@ -7950,14 +7972,29 @@ const deserializeAws_queryStack = (output: any, context: __SerdeContext): Stack context ); } - if (output["DriftInformation"] !== undefined) { - contents.DriftInformation = deserializeAws_queryStackDriftInformation(output["DriftInformation"], context); + if (output["CreationTime"] !== undefined) { + contents.CreationTime = new Date(output["CreationTime"]); } - if (output["RoleARN"] !== undefined) { - contents.RoleARN = output["RoleARN"]; + if (output["DeletionTime"] !== undefined) { + contents.DeletionTime = new Date(output["DeletionTime"]); } - if (output["ParentId"] !== undefined) { - contents.ParentId = output["ParentId"]; + if (output["LastUpdatedTime"] !== undefined) { + contents.LastUpdatedTime = new Date(output["LastUpdatedTime"]); + } + if (output["RollbackConfiguration"] !== undefined) { + contents.RollbackConfiguration = deserializeAws_queryRollbackConfiguration( + output["RollbackConfiguration"], + context + ); + } + if (output["StackStatus"] !== undefined) { + contents.StackStatus = output["StackStatus"]; + } + if (output["StackStatusReason"] !== undefined) { + contents.StackStatusReason = output["StackStatusReason"]; + } + if (output["DisableRollback"] !== undefined) { + contents.DisableRollback = output["DisableRollback"] == "true"; } if (output.NotificationARNs === "") { contents.NotificationARNs = []; @@ -7968,8 +8005,8 @@ const deserializeAws_queryStack = (output: any, context: __SerdeContext): Stack context ); } - if (output["ChangeSetId"] !== undefined) { - contents.ChangeSetId = output["ChangeSetId"]; + if (output["TimeoutInMinutes"] !== undefined) { + contents.TimeoutInMinutes = parseInt(output["TimeoutInMinutes"]); } if (output.Capabilities === "") { contents.Capabilities = []; @@ -7980,17 +8017,14 @@ const deserializeAws_queryStack = (output: any, context: __SerdeContext): Stack context ); } - if (output["TimeoutInMinutes"] !== undefined) { - contents.TimeoutInMinutes = parseInt(output["TimeoutInMinutes"]); - } - if (output["LastUpdatedTime"] !== undefined) { - contents.LastUpdatedTime = new Date(output["LastUpdatedTime"]); + if (output.Outputs === "") { + contents.Outputs = []; } - if (output["StackStatusReason"] !== undefined) { - contents.StackStatusReason = output["StackStatusReason"]; + if (output["Outputs"] !== undefined && output["Outputs"]["member"] !== undefined) { + contents.Outputs = deserializeAws_queryOutputs(__getArrayIfSingleItem(output["Outputs"]["member"]), context); } - if (output["DisableRollback"] !== undefined) { - contents.DisableRollback = output["DisableRollback"] == "true"; + if (output["RoleARN"] !== undefined) { + contents.RoleARN = output["RoleARN"]; } if (output.Tags === "") { contents.Tags = []; @@ -7998,30 +8032,18 @@ const deserializeAws_queryStack = (output: any, context: __SerdeContext): Stack if (output["Tags"] !== undefined && output["Tags"]["member"] !== undefined) { contents.Tags = deserializeAws_queryTags(__getArrayIfSingleItem(output["Tags"]["member"]), context); } - if (output["CreationTime"] !== undefined) { - contents.CreationTime = new Date(output["CreationTime"]); - } - if (output["StackStatus"] !== undefined) { - contents.StackStatus = output["StackStatus"]; - } - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; - } - if (output["DeletionTime"] !== undefined) { - contents.DeletionTime = new Date(output["DeletionTime"]); - } - if (output["RollbackConfiguration"] !== undefined) { - contents.RollbackConfiguration = deserializeAws_queryRollbackConfiguration( - output["RollbackConfiguration"], - context - ); - } if (output["EnableTerminationProtection"] !== undefined) { contents.EnableTerminationProtection = output["EnableTerminationProtection"] == "true"; } + if (output["ParentId"] !== undefined) { + contents.ParentId = output["ParentId"]; + } if (output["RootId"] !== undefined) { contents.RootId = output["RootId"]; } + if (output["DriftInformation"] !== undefined) { + contents.DriftInformation = deserializeAws_queryStackDriftInformation(output["DriftInformation"], context); + } return contents; }; @@ -8058,50 +8080,50 @@ const deserializeAws_queryStackDriftInformationSummary = ( const deserializeAws_queryStackEvent = (output: any, context: __SerdeContext): StackEvent => { let contents: any = { - Timestamp: undefined, - ResourceStatusReason: undefined, - ClientRequestToken: undefined, - PhysicalResourceId: undefined, - ResourceProperties: undefined, + StackId: undefined, + EventId: undefined, StackName: undefined, - ResourceStatus: undefined, LogicalResourceId: undefined, + PhysicalResourceId: undefined, ResourceType: undefined, - EventId: undefined, - StackId: undefined, + Timestamp: undefined, + ResourceStatus: undefined, + ResourceStatusReason: undefined, + ResourceProperties: undefined, + ClientRequestToken: undefined, }; - if (output["Timestamp"] !== undefined) { - contents.Timestamp = new Date(output["Timestamp"]); + if (output["StackId"] !== undefined) { + contents.StackId = output["StackId"]; } - if (output["ResourceStatusReason"] !== undefined) { - contents.ResourceStatusReason = output["ResourceStatusReason"]; + if (output["EventId"] !== undefined) { + contents.EventId = output["EventId"]; } - if (output["ClientRequestToken"] !== undefined) { - contents.ClientRequestToken = output["ClientRequestToken"]; + if (output["StackName"] !== undefined) { + contents.StackName = output["StackName"]; + } + if (output["LogicalResourceId"] !== undefined) { + contents.LogicalResourceId = output["LogicalResourceId"]; } if (output["PhysicalResourceId"] !== undefined) { contents.PhysicalResourceId = output["PhysicalResourceId"]; } - if (output["ResourceProperties"] !== undefined) { - contents.ResourceProperties = output["ResourceProperties"]; + if (output["ResourceType"] !== undefined) { + contents.ResourceType = output["ResourceType"]; } - if (output["StackName"] !== undefined) { - contents.StackName = output["StackName"]; + if (output["Timestamp"] !== undefined) { + contents.Timestamp = new Date(output["Timestamp"]); } if (output["ResourceStatus"] !== undefined) { contents.ResourceStatus = output["ResourceStatus"]; } - if (output["LogicalResourceId"] !== undefined) { - contents.LogicalResourceId = output["LogicalResourceId"]; - } - if (output["ResourceType"] !== undefined) { - contents.ResourceType = output["ResourceType"]; + if (output["ResourceStatusReason"] !== undefined) { + contents.ResourceStatusReason = output["ResourceStatusReason"]; } - if (output["EventId"] !== undefined) { - contents.EventId = output["EventId"]; + if (output["ResourceProperties"] !== undefined) { + contents.ResourceProperties = output["ResourceProperties"]; } - if (output["StackId"] !== undefined) { - contents.StackId = output["StackId"]; + if (output["ClientRequestToken"] !== undefined) { + contents.ClientRequestToken = output["ClientRequestToken"]; } return contents; }; @@ -8112,18 +8134,30 @@ const deserializeAws_queryStackEvents = (output: any, context: __SerdeContext): const deserializeAws_queryStackInstance = (output: any, context: __SerdeContext): StackInstance => { let contents: any = { - ParameterOverrides: undefined, - StatusReason: undefined, + StackSetId: undefined, + Region: undefined, + Account: undefined, StackId: undefined, + ParameterOverrides: undefined, Status: undefined, - Region: undefined, StackInstanceStatus: undefined, + StatusReason: undefined, OrganizationalUnitId: undefined, DriftStatus: undefined, - StackSetId: undefined, - Account: undefined, LastDriftCheckTimestamp: undefined, }; + if (output["StackSetId"] !== undefined) { + contents.StackSetId = output["StackSetId"]; + } + if (output["Region"] !== undefined) { + contents.Region = output["Region"]; + } + if (output["Account"] !== undefined) { + contents.Account = output["Account"]; + } + if (output["StackId"] !== undefined) { + contents.StackId = output["StackId"]; + } if (output.ParameterOverrides === "") { contents.ParameterOverrides = []; } @@ -8133,36 +8167,24 @@ const deserializeAws_queryStackInstance = (output: any, context: __SerdeContext) context ); } - if (output["StatusReason"] !== undefined) { - contents.StatusReason = output["StatusReason"]; - } - if (output["StackId"] !== undefined) { - contents.StackId = output["StackId"]; - } if (output["Status"] !== undefined) { contents.Status = output["Status"]; } - if (output["Region"] !== undefined) { - contents.Region = output["Region"]; - } if (output["StackInstanceStatus"] !== undefined) { contents.StackInstanceStatus = deserializeAws_queryStackInstanceComprehensiveStatus( output["StackInstanceStatus"], context ); } + if (output["StatusReason"] !== undefined) { + contents.StatusReason = output["StatusReason"]; + } if (output["OrganizationalUnitId"] !== undefined) { contents.OrganizationalUnitId = output["OrganizationalUnitId"]; } if (output["DriftStatus"] !== undefined) { contents.DriftStatus = output["DriftStatus"]; } - if (output["StackSetId"] !== undefined) { - contents.StackSetId = output["StackSetId"]; - } - if (output["Account"] !== undefined) { - contents.Account = output["Account"]; - } if (output["LastDriftCheckTimestamp"] !== undefined) { contents.LastDriftCheckTimestamp = new Date(output["LastDriftCheckTimestamp"]); } @@ -8201,192 +8223,180 @@ const deserializeAws_queryStackInstanceSummaries = (output: any, context: __Serd const deserializeAws_queryStackInstanceSummary = (output: any, context: __SerdeContext): StackInstanceSummary => { let contents: any = { - OrganizationalUnitId: undefined, - Status: undefined, + StackSetId: undefined, Region: undefined, + Account: undefined, StackId: undefined, + Status: undefined, StatusReason: undefined, - DriftStatus: undefined, StackInstanceStatus: undefined, + OrganizationalUnitId: undefined, + DriftStatus: undefined, LastDriftCheckTimestamp: undefined, - Account: undefined, - StackSetId: undefined, }; - if (output["OrganizationalUnitId"] !== undefined) { - contents.OrganizationalUnitId = output["OrganizationalUnitId"]; - } - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; + if (output["StackSetId"] !== undefined) { + contents.StackSetId = output["StackSetId"]; } if (output["Region"] !== undefined) { contents.Region = output["Region"]; } + if (output["Account"] !== undefined) { + contents.Account = output["Account"]; + } if (output["StackId"] !== undefined) { contents.StackId = output["StackId"]; } + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; + } if (output["StatusReason"] !== undefined) { contents.StatusReason = output["StatusReason"]; } - if (output["DriftStatus"] !== undefined) { - contents.DriftStatus = output["DriftStatus"]; - } if (output["StackInstanceStatus"] !== undefined) { contents.StackInstanceStatus = deserializeAws_queryStackInstanceComprehensiveStatus( output["StackInstanceStatus"], context ); } - if (output["LastDriftCheckTimestamp"] !== undefined) { - contents.LastDriftCheckTimestamp = new Date(output["LastDriftCheckTimestamp"]); + if (output["OrganizationalUnitId"] !== undefined) { + contents.OrganizationalUnitId = output["OrganizationalUnitId"]; } - if (output["Account"] !== undefined) { - contents.Account = output["Account"]; + if (output["DriftStatus"] !== undefined) { + contents.DriftStatus = output["DriftStatus"]; } - if (output["StackSetId"] !== undefined) { - contents.StackSetId = output["StackSetId"]; + if (output["LastDriftCheckTimestamp"] !== undefined) { + contents.LastDriftCheckTimestamp = new Date(output["LastDriftCheckTimestamp"]); } return contents; }; const deserializeAws_queryStackResource = (output: any, context: __SerdeContext): StackResource => { let contents: any = { - Description: undefined, - ResourceStatusReason: undefined, - Timestamp: undefined, StackName: undefined, StackId: undefined, - ResourceStatus: undefined, LogicalResourceId: undefined, + PhysicalResourceId: undefined, ResourceType: undefined, + Timestamp: undefined, + ResourceStatus: undefined, + ResourceStatusReason: undefined, + Description: undefined, DriftInformation: undefined, - PhysicalResourceId: undefined, + ModuleInfo: undefined, }; - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; - } - if (output["ResourceStatusReason"] !== undefined) { - contents.ResourceStatusReason = output["ResourceStatusReason"]; - } - if (output["Timestamp"] !== undefined) { - contents.Timestamp = new Date(output["Timestamp"]); - } if (output["StackName"] !== undefined) { contents.StackName = output["StackName"]; } if (output["StackId"] !== undefined) { contents.StackId = output["StackId"]; } - if (output["ResourceStatus"] !== undefined) { - contents.ResourceStatus = output["ResourceStatus"]; - } if (output["LogicalResourceId"] !== undefined) { contents.LogicalResourceId = output["LogicalResourceId"]; } + if (output["PhysicalResourceId"] !== undefined) { + contents.PhysicalResourceId = output["PhysicalResourceId"]; + } if (output["ResourceType"] !== undefined) { contents.ResourceType = output["ResourceType"]; } + if (output["Timestamp"] !== undefined) { + contents.Timestamp = new Date(output["Timestamp"]); + } + if (output["ResourceStatus"] !== undefined) { + contents.ResourceStatus = output["ResourceStatus"]; + } + if (output["ResourceStatusReason"] !== undefined) { + contents.ResourceStatusReason = output["ResourceStatusReason"]; + } + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; + } if (output["DriftInformation"] !== undefined) { contents.DriftInformation = deserializeAws_queryStackResourceDriftInformation(output["DriftInformation"], context); } - if (output["PhysicalResourceId"] !== undefined) { - contents.PhysicalResourceId = output["PhysicalResourceId"]; + if (output["ModuleInfo"] !== undefined) { + contents.ModuleInfo = deserializeAws_queryModuleInfo(output["ModuleInfo"], context); } return contents; }; const deserializeAws_queryStackResourceDetail = (output: any, context: __SerdeContext): StackResourceDetail => { let contents: any = { - Description: undefined, - LastUpdatedTimestamp: undefined, + StackName: undefined, + StackId: undefined, + LogicalResourceId: undefined, + PhysicalResourceId: undefined, ResourceType: undefined, - ResourceStatusReason: undefined, + LastUpdatedTimestamp: undefined, ResourceStatus: undefined, + ResourceStatusReason: undefined, + Description: undefined, Metadata: undefined, - PhysicalResourceId: undefined, DriftInformation: undefined, - LogicalResourceId: undefined, - StackId: undefined, - StackName: undefined, + ModuleInfo: undefined, }; - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; + if (output["StackName"] !== undefined) { + contents.StackName = output["StackName"]; } - if (output["LastUpdatedTimestamp"] !== undefined) { - contents.LastUpdatedTimestamp = new Date(output["LastUpdatedTimestamp"]); + if (output["StackId"] !== undefined) { + contents.StackId = output["StackId"]; + } + if (output["LogicalResourceId"] !== undefined) { + contents.LogicalResourceId = output["LogicalResourceId"]; + } + if (output["PhysicalResourceId"] !== undefined) { + contents.PhysicalResourceId = output["PhysicalResourceId"]; } if (output["ResourceType"] !== undefined) { contents.ResourceType = output["ResourceType"]; } - if (output["ResourceStatusReason"] !== undefined) { - contents.ResourceStatusReason = output["ResourceStatusReason"]; + if (output["LastUpdatedTimestamp"] !== undefined) { + contents.LastUpdatedTimestamp = new Date(output["LastUpdatedTimestamp"]); } if (output["ResourceStatus"] !== undefined) { contents.ResourceStatus = output["ResourceStatus"]; } + if (output["ResourceStatusReason"] !== undefined) { + contents.ResourceStatusReason = output["ResourceStatusReason"]; + } + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; + } if (output["Metadata"] !== undefined) { contents.Metadata = output["Metadata"]; } - if (output["PhysicalResourceId"] !== undefined) { - contents.PhysicalResourceId = output["PhysicalResourceId"]; - } if (output["DriftInformation"] !== undefined) { contents.DriftInformation = deserializeAws_queryStackResourceDriftInformation(output["DriftInformation"], context); } - if (output["LogicalResourceId"] !== undefined) { - contents.LogicalResourceId = output["LogicalResourceId"]; - } - if (output["StackId"] !== undefined) { - contents.StackId = output["StackId"]; - } - if (output["StackName"] !== undefined) { - contents.StackName = output["StackName"]; + if (output["ModuleInfo"] !== undefined) { + contents.ModuleInfo = deserializeAws_queryModuleInfo(output["ModuleInfo"], context); } return contents; }; const deserializeAws_queryStackResourceDrift = (output: any, context: __SerdeContext): StackResourceDrift => { let contents: any = { - PropertyDifferences: undefined, - LogicalResourceId: undefined, - Timestamp: undefined, - ActualProperties: undefined, StackId: undefined, - StackResourceDriftStatus: undefined, + LogicalResourceId: undefined, PhysicalResourceId: undefined, - ExpectedProperties: undefined, PhysicalResourceIdContext: undefined, ResourceType: undefined, + ExpectedProperties: undefined, + ActualProperties: undefined, + PropertyDifferences: undefined, + StackResourceDriftStatus: undefined, + Timestamp: undefined, + ModuleInfo: undefined, }; - if (output.PropertyDifferences === "") { - contents.PropertyDifferences = []; - } - if (output["PropertyDifferences"] !== undefined && output["PropertyDifferences"]["member"] !== undefined) { - contents.PropertyDifferences = deserializeAws_queryPropertyDifferences( - __getArrayIfSingleItem(output["PropertyDifferences"]["member"]), - context - ); - } - if (output["LogicalResourceId"] !== undefined) { - contents.LogicalResourceId = output["LogicalResourceId"]; - } - if (output["Timestamp"] !== undefined) { - contents.Timestamp = new Date(output["Timestamp"]); - } - if (output["ActualProperties"] !== undefined) { - contents.ActualProperties = output["ActualProperties"]; - } if (output["StackId"] !== undefined) { contents.StackId = output["StackId"]; } - if (output["StackResourceDriftStatus"] !== undefined) { - contents.StackResourceDriftStatus = output["StackResourceDriftStatus"]; + if (output["LogicalResourceId"] !== undefined) { + contents.LogicalResourceId = output["LogicalResourceId"]; } if (output["PhysicalResourceId"] !== undefined) { contents.PhysicalResourceId = output["PhysicalResourceId"]; } - if (output["ExpectedProperties"] !== undefined) { - contents.ExpectedProperties = output["ExpectedProperties"]; - } if (output.PhysicalResourceIdContext === "") { contents.PhysicalResourceIdContext = []; } @@ -8402,6 +8412,30 @@ const deserializeAws_queryStackResourceDrift = (output: any, context: __SerdeCon if (output["ResourceType"] !== undefined) { contents.ResourceType = output["ResourceType"]; } + if (output["ExpectedProperties"] !== undefined) { + contents.ExpectedProperties = output["ExpectedProperties"]; + } + if (output["ActualProperties"] !== undefined) { + contents.ActualProperties = output["ActualProperties"]; + } + if (output.PropertyDifferences === "") { + contents.PropertyDifferences = []; + } + if (output["PropertyDifferences"] !== undefined && output["PropertyDifferences"]["member"] !== undefined) { + contents.PropertyDifferences = deserializeAws_queryPropertyDifferences( + __getArrayIfSingleItem(output["PropertyDifferences"]["member"]), + context + ); + } + if (output["StackResourceDriftStatus"] !== undefined) { + contents.StackResourceDriftStatus = output["StackResourceDriftStatus"]; + } + if (output["Timestamp"] !== undefined) { + contents.Timestamp = new Date(output["Timestamp"]); + } + if (output["ModuleInfo"] !== undefined) { + contents.ModuleInfo = deserializeAws_queryModuleInfo(output["ModuleInfo"], context); + } return contents; }; @@ -8410,15 +8444,15 @@ const deserializeAws_queryStackResourceDriftInformation = ( context: __SerdeContext ): StackResourceDriftInformation => { let contents: any = { - LastCheckTimestamp: undefined, StackResourceDriftStatus: undefined, + LastCheckTimestamp: undefined, }; - if (output["LastCheckTimestamp"] !== undefined) { - contents.LastCheckTimestamp = new Date(output["LastCheckTimestamp"]); - } if (output["StackResourceDriftStatus"] !== undefined) { contents.StackResourceDriftStatus = output["StackResourceDriftStatus"]; } + if (output["LastCheckTimestamp"] !== undefined) { + contents.LastCheckTimestamp = new Date(output["LastCheckTimestamp"]); + } return contents; }; @@ -8453,17 +8487,24 @@ const deserializeAws_queryStackResourceSummaries = (output: any, context: __Serd const deserializeAws_queryStackResourceSummary = (output: any, context: __SerdeContext): StackResourceSummary => { let contents: any = { + LogicalResourceId: undefined, PhysicalResourceId: undefined, + ResourceType: undefined, LastUpdatedTimestamp: undefined, ResourceStatus: undefined, ResourceStatusReason: undefined, - ResourceType: undefined, - LogicalResourceId: undefined, DriftInformation: undefined, + ModuleInfo: undefined, }; + if (output["LogicalResourceId"] !== undefined) { + contents.LogicalResourceId = output["LogicalResourceId"]; + } if (output["PhysicalResourceId"] !== undefined) { contents.PhysicalResourceId = output["PhysicalResourceId"]; } + if (output["ResourceType"] !== undefined) { + contents.ResourceType = output["ResourceType"]; + } if (output["LastUpdatedTimestamp"] !== undefined) { contents.LastUpdatedTimestamp = new Date(output["LastUpdatedTimestamp"]); } @@ -8473,18 +8514,15 @@ const deserializeAws_queryStackResourceSummary = (output: any, context: __SerdeC if (output["ResourceStatusReason"] !== undefined) { contents.ResourceStatusReason = output["ResourceStatusReason"]; } - if (output["ResourceType"] !== undefined) { - contents.ResourceType = output["ResourceType"]; - } - if (output["LogicalResourceId"] !== undefined) { - contents.LogicalResourceId = output["LogicalResourceId"]; - } if (output["DriftInformation"] !== undefined) { contents.DriftInformation = deserializeAws_queryStackResourceDriftInformationSummary( output["DriftInformation"], context ); } + if (output["ModuleInfo"] !== undefined) { + contents.ModuleInfo = deserializeAws_queryModuleInfo(output["ModuleInfo"], context); + } return contents; }; @@ -8494,31 +8532,37 @@ const deserializeAws_queryStacks = (output: any, context: __SerdeContext): Stack const deserializeAws_queryStackSet = (output: any, context: __SerdeContext): StackSet => { let contents: any = { - Description: undefined, StackSetName: undefined, + StackSetId: undefined, + Description: undefined, Status: undefined, + TemplateBody: undefined, Parameters: undefined, - StackSetARN: undefined, Capabilities: undefined, + Tags: undefined, + StackSetARN: undefined, AdministrationRoleARN: undefined, - StackSetId: undefined, ExecutionRoleName: undefined, + StackSetDriftDetectionDetails: undefined, AutoDeployment: undefined, - OrganizationalUnitIds: undefined, PermissionModel: undefined, - TemplateBody: undefined, - Tags: undefined, - StackSetDriftDetectionDetails: undefined, + OrganizationalUnitIds: undefined, }; - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; - } if (output["StackSetName"] !== undefined) { contents.StackSetName = output["StackSetName"]; } + if (output["StackSetId"] !== undefined) { + contents.StackSetId = output["StackSetId"]; + } + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; + } if (output["Status"] !== undefined) { contents.Status = output["Status"]; } + if (output["TemplateBody"] !== undefined) { + contents.TemplateBody = output["TemplateBody"]; + } if (output.Parameters === "") { contents.Parameters = []; } @@ -8528,9 +8572,6 @@ const deserializeAws_queryStackSet = (output: any, context: __SerdeContext): Sta context ); } - if (output["StackSetARN"] !== undefined) { - contents.StackSetARN = output["StackSetARN"]; - } if (output.Capabilities === "") { contents.Capabilities = []; } @@ -8540,18 +8581,33 @@ const deserializeAws_queryStackSet = (output: any, context: __SerdeContext): Sta context ); } + if (output.Tags === "") { + contents.Tags = []; + } + if (output["Tags"] !== undefined && output["Tags"]["member"] !== undefined) { + contents.Tags = deserializeAws_queryTags(__getArrayIfSingleItem(output["Tags"]["member"]), context); + } + if (output["StackSetARN"] !== undefined) { + contents.StackSetARN = output["StackSetARN"]; + } if (output["AdministrationRoleARN"] !== undefined) { contents.AdministrationRoleARN = output["AdministrationRoleARN"]; } - if (output["StackSetId"] !== undefined) { - contents.StackSetId = output["StackSetId"]; - } if (output["ExecutionRoleName"] !== undefined) { contents.ExecutionRoleName = output["ExecutionRoleName"]; } + if (output["StackSetDriftDetectionDetails"] !== undefined) { + contents.StackSetDriftDetectionDetails = deserializeAws_queryStackSetDriftDetectionDetails( + output["StackSetDriftDetectionDetails"], + context + ); + } if (output["AutoDeployment"] !== undefined) { contents.AutoDeployment = deserializeAws_queryAutoDeployment(output["AutoDeployment"], context); } + if (output["PermissionModel"] !== undefined) { + contents.PermissionModel = output["PermissionModel"]; + } if (output.OrganizationalUnitIds === "") { contents.OrganizationalUnitIds = []; } @@ -8561,24 +8617,6 @@ const deserializeAws_queryStackSet = (output: any, context: __SerdeContext): Sta context ); } - if (output["PermissionModel"] !== undefined) { - contents.PermissionModel = output["PermissionModel"]; - } - if (output["TemplateBody"] !== undefined) { - contents.TemplateBody = output["TemplateBody"]; - } - if (output.Tags === "") { - contents.Tags = []; - } - if (output["Tags"] !== undefined && output["Tags"]["member"] !== undefined) { - contents.Tags = deserializeAws_queryTags(__getArrayIfSingleItem(output["Tags"]["member"]), context); - } - if (output["StackSetDriftDetectionDetails"] !== undefined) { - contents.StackSetDriftDetectionDetails = deserializeAws_queryStackSetDriftDetectionDetails( - output["StackSetDriftDetectionDetails"], - context - ); - } return contents; }; @@ -8587,38 +8625,38 @@ const deserializeAws_queryStackSetDriftDetectionDetails = ( context: __SerdeContext ): StackSetDriftDetectionDetails => { let contents: any = { - InSyncStackInstancesCount: undefined, - FailedStackInstancesCount: undefined, - LastDriftCheckTimestamp: undefined, - InProgressStackInstancesCount: undefined, DriftStatus: undefined, - DriftedStackInstancesCount: undefined, - TotalStackInstancesCount: undefined, DriftDetectionStatus: undefined, + LastDriftCheckTimestamp: undefined, + TotalStackInstancesCount: undefined, + DriftedStackInstancesCount: undefined, + InSyncStackInstancesCount: undefined, + InProgressStackInstancesCount: undefined, + FailedStackInstancesCount: undefined, }; - if (output["InSyncStackInstancesCount"] !== undefined) { - contents.InSyncStackInstancesCount = parseInt(output["InSyncStackInstancesCount"]); + if (output["DriftStatus"] !== undefined) { + contents.DriftStatus = output["DriftStatus"]; } - if (output["FailedStackInstancesCount"] !== undefined) { - contents.FailedStackInstancesCount = parseInt(output["FailedStackInstancesCount"]); + if (output["DriftDetectionStatus"] !== undefined) { + contents.DriftDetectionStatus = output["DriftDetectionStatus"]; } if (output["LastDriftCheckTimestamp"] !== undefined) { contents.LastDriftCheckTimestamp = new Date(output["LastDriftCheckTimestamp"]); } - if (output["InProgressStackInstancesCount"] !== undefined) { - contents.InProgressStackInstancesCount = parseInt(output["InProgressStackInstancesCount"]); - } - if (output["DriftStatus"] !== undefined) { - contents.DriftStatus = output["DriftStatus"]; + if (output["TotalStackInstancesCount"] !== undefined) { + contents.TotalStackInstancesCount = parseInt(output["TotalStackInstancesCount"]); } if (output["DriftedStackInstancesCount"] !== undefined) { contents.DriftedStackInstancesCount = parseInt(output["DriftedStackInstancesCount"]); } - if (output["TotalStackInstancesCount"] !== undefined) { - contents.TotalStackInstancesCount = parseInt(output["TotalStackInstancesCount"]); + if (output["InSyncStackInstancesCount"] !== undefined) { + contents.InSyncStackInstancesCount = parseInt(output["InSyncStackInstancesCount"]); } - if (output["DriftDetectionStatus"] !== undefined) { - contents.DriftDetectionStatus = output["DriftDetectionStatus"]; + if (output["InProgressStackInstancesCount"] !== undefined) { + contents.InProgressStackInstancesCount = parseInt(output["InProgressStackInstancesCount"]); + } + if (output["FailedStackInstancesCount"] !== undefined) { + contents.FailedStackInstancesCount = parseInt(output["FailedStackInstancesCount"]); } return contents; }; @@ -8652,60 +8690,60 @@ const deserializeAws_queryStackSetNotFoundException = ( const deserializeAws_queryStackSetOperation = (output: any, context: __SerdeContext): StackSetOperation => { let contents: any = { OperationId: undefined, - Status: undefined, - EndTimestamp: undefined, - Action: undefined, - StackSetDriftDetectionDetails: undefined, - CreationTimestamp: undefined, - ExecutionRoleName: undefined, - AdministrationRoleARN: undefined, StackSetId: undefined, - DeploymentTargets: undefined, + Action: undefined, + Status: undefined, OperationPreferences: undefined, RetainStacks: undefined, + AdministrationRoleARN: undefined, + ExecutionRoleName: undefined, + CreationTimestamp: undefined, + EndTimestamp: undefined, + DeploymentTargets: undefined, + StackSetDriftDetectionDetails: undefined, }; if (output["OperationId"] !== undefined) { contents.OperationId = output["OperationId"]; } - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; - } - if (output["EndTimestamp"] !== undefined) { - contents.EndTimestamp = new Date(output["EndTimestamp"]); + if (output["StackSetId"] !== undefined) { + contents.StackSetId = output["StackSetId"]; } if (output["Action"] !== undefined) { contents.Action = output["Action"]; } - if (output["StackSetDriftDetectionDetails"] !== undefined) { - contents.StackSetDriftDetectionDetails = deserializeAws_queryStackSetDriftDetectionDetails( - output["StackSetDriftDetectionDetails"], + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; + } + if (output["OperationPreferences"] !== undefined) { + contents.OperationPreferences = deserializeAws_queryStackSetOperationPreferences( + output["OperationPreferences"], context ); } - if (output["CreationTimestamp"] !== undefined) { - contents.CreationTimestamp = new Date(output["CreationTimestamp"]); + if (output["RetainStacks"] !== undefined) { + contents.RetainStacks = output["RetainStacks"] == "true"; + } + if (output["AdministrationRoleARN"] !== undefined) { + contents.AdministrationRoleARN = output["AdministrationRoleARN"]; } if (output["ExecutionRoleName"] !== undefined) { contents.ExecutionRoleName = output["ExecutionRoleName"]; } - if (output["AdministrationRoleARN"] !== undefined) { - contents.AdministrationRoleARN = output["AdministrationRoleARN"]; + if (output["CreationTimestamp"] !== undefined) { + contents.CreationTimestamp = new Date(output["CreationTimestamp"]); } - if (output["StackSetId"] !== undefined) { - contents.StackSetId = output["StackSetId"]; + if (output["EndTimestamp"] !== undefined) { + contents.EndTimestamp = new Date(output["EndTimestamp"]); } if (output["DeploymentTargets"] !== undefined) { contents.DeploymentTargets = deserializeAws_queryDeploymentTargets(output["DeploymentTargets"], context); } - if (output["OperationPreferences"] !== undefined) { - contents.OperationPreferences = deserializeAws_queryStackSetOperationPreferences( - output["OperationPreferences"], + if (output["StackSetDriftDetectionDetails"] !== undefined) { + contents.StackSetDriftDetectionDetails = deserializeAws_queryStackSetDriftDetectionDetails( + output["StackSetDriftDetectionDetails"], context ); } - if (output["RetainStacks"] !== undefined) { - contents.RetainStacks = output["RetainStacks"] == "true"; - } return contents; }; @@ -8715,10 +8753,10 @@ const deserializeAws_queryStackSetOperationPreferences = ( ): StackSetOperationPreferences => { let contents: any = { RegionOrder: undefined, - MaxConcurrentPercentage: undefined, FailureToleranceCount: undefined, FailureTolerancePercentage: undefined, MaxConcurrentCount: undefined, + MaxConcurrentPercentage: undefined, }; if (output.RegionOrder === "") { contents.RegionOrder = []; @@ -8729,9 +8767,6 @@ const deserializeAws_queryStackSetOperationPreferences = ( context ); } - if (output["MaxConcurrentPercentage"] !== undefined) { - contents.MaxConcurrentPercentage = parseInt(output["MaxConcurrentPercentage"]); - } if (output["FailureToleranceCount"] !== undefined) { contents.FailureToleranceCount = parseInt(output["FailureToleranceCount"]); } @@ -8741,6 +8776,9 @@ const deserializeAws_queryStackSetOperationPreferences = ( if (output["MaxConcurrentCount"] !== undefined) { contents.MaxConcurrentCount = parseInt(output["MaxConcurrentCount"]); } + if (output["MaxConcurrentPercentage"] !== undefined) { + contents.MaxConcurrentPercentage = parseInt(output["MaxConcurrentPercentage"]); + } return contents; }; @@ -8756,31 +8794,31 @@ const deserializeAws_queryStackSetOperationResultSummary = ( context: __SerdeContext ): StackSetOperationResultSummary => { let contents: any = { - Status: undefined, - OrganizationalUnitId: undefined, Account: undefined, - AccountGateResult: undefined, Region: undefined, + Status: undefined, StatusReason: undefined, + AccountGateResult: undefined, + OrganizationalUnitId: undefined, }; - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; - } - if (output["OrganizationalUnitId"] !== undefined) { - contents.OrganizationalUnitId = output["OrganizationalUnitId"]; - } if (output["Account"] !== undefined) { contents.Account = output["Account"]; } - if (output["AccountGateResult"] !== undefined) { - contents.AccountGateResult = deserializeAws_queryAccountGateResult(output["AccountGateResult"], context); - } if (output["Region"] !== undefined) { contents.Region = output["Region"]; } + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; + } if (output["StatusReason"] !== undefined) { contents.StatusReason = output["StatusReason"]; } + if (output["AccountGateResult"] !== undefined) { + contents.AccountGateResult = deserializeAws_queryAccountGateResult(output["AccountGateResult"], context); + } + if (output["OrganizationalUnitId"] !== undefined) { + contents.OrganizationalUnitId = output["OrganizationalUnitId"]; + } return contents; }; @@ -8797,25 +8835,25 @@ const deserializeAws_queryStackSetOperationSummary = ( ): StackSetOperationSummary => { let contents: any = { OperationId: undefined, + Action: undefined, Status: undefined, - EndTimestamp: undefined, CreationTimestamp: undefined, - Action: undefined, + EndTimestamp: undefined, }; if (output["OperationId"] !== undefined) { contents.OperationId = output["OperationId"]; } + if (output["Action"] !== undefined) { + contents.Action = output["Action"]; + } if (output["Status"] !== undefined) { contents.Status = output["Status"]; } - if (output["EndTimestamp"] !== undefined) { - contents.EndTimestamp = new Date(output["EndTimestamp"]); - } if (output["CreationTimestamp"] !== undefined) { contents.CreationTimestamp = new Date(output["CreationTimestamp"]); } - if (output["Action"] !== undefined) { - contents.Action = output["Action"]; + if (output["EndTimestamp"] !== undefined) { + contents.EndTimestamp = new Date(output["EndTimestamp"]); } return contents; }; @@ -8826,39 +8864,39 @@ const deserializeAws_queryStackSetSummaries = (output: any, context: __SerdeCont const deserializeAws_queryStackSetSummary = (output: any, context: __SerdeContext): StackSetSummary => { let contents: any = { - Description: undefined, - AutoDeployment: undefined, - LastDriftCheckTimestamp: undefined, - DriftStatus: undefined, StackSetName: undefined, StackSetId: undefined, + Description: undefined, Status: undefined, + AutoDeployment: undefined, PermissionModel: undefined, + DriftStatus: undefined, + LastDriftCheckTimestamp: undefined, }; - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; - } - if (output["AutoDeployment"] !== undefined) { - contents.AutoDeployment = deserializeAws_queryAutoDeployment(output["AutoDeployment"], context); - } - if (output["LastDriftCheckTimestamp"] !== undefined) { - contents.LastDriftCheckTimestamp = new Date(output["LastDriftCheckTimestamp"]); - } - if (output["DriftStatus"] !== undefined) { - contents.DriftStatus = output["DriftStatus"]; - } if (output["StackSetName"] !== undefined) { contents.StackSetName = output["StackSetName"]; } if (output["StackSetId"] !== undefined) { contents.StackSetId = output["StackSetId"]; } + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; + } if (output["Status"] !== undefined) { contents.Status = output["Status"]; } + if (output["AutoDeployment"] !== undefined) { + contents.AutoDeployment = deserializeAws_queryAutoDeployment(output["AutoDeployment"], context); + } if (output["PermissionModel"] !== undefined) { contents.PermissionModel = output["PermissionModel"]; } + if (output["DriftStatus"] !== undefined) { + contents.DriftStatus = output["DriftStatus"]; + } + if (output["LastDriftCheckTimestamp"] !== undefined) { + contents.LastDriftCheckTimestamp = new Date(output["LastDriftCheckTimestamp"]); + } return contents; }; @@ -8868,23 +8906,32 @@ const deserializeAws_queryStackSummaries = (output: any, context: __SerdeContext const deserializeAws_queryStackSummary = (output: any, context: __SerdeContext): StackSummary => { let contents: any = { - DriftInformation: undefined, - StackStatusReason: undefined, + StackId: undefined, + StackName: undefined, + TemplateDescription: undefined, + CreationTime: undefined, + LastUpdatedTime: undefined, DeletionTime: undefined, StackStatus: undefined, + StackStatusReason: undefined, ParentId: undefined, RootId: undefined, - TemplateDescription: undefined, - StackId: undefined, - LastUpdatedTime: undefined, - CreationTime: undefined, - StackName: undefined, + DriftInformation: undefined, }; - if (output["DriftInformation"] !== undefined) { - contents.DriftInformation = deserializeAws_queryStackDriftInformationSummary(output["DriftInformation"], context); + if (output["StackId"] !== undefined) { + contents.StackId = output["StackId"]; } - if (output["StackStatusReason"] !== undefined) { - contents.StackStatusReason = output["StackStatusReason"]; + if (output["StackName"] !== undefined) { + contents.StackName = output["StackName"]; + } + if (output["TemplateDescription"] !== undefined) { + contents.TemplateDescription = output["TemplateDescription"]; + } + if (output["CreationTime"] !== undefined) { + contents.CreationTime = new Date(output["CreationTime"]); + } + if (output["LastUpdatedTime"] !== undefined) { + contents.LastUpdatedTime = new Date(output["LastUpdatedTime"]); } if (output["DeletionTime"] !== undefined) { contents.DeletionTime = new Date(output["DeletionTime"]); @@ -8892,26 +8939,17 @@ const deserializeAws_queryStackSummary = (output: any, context: __SerdeContext): if (output["StackStatus"] !== undefined) { contents.StackStatus = output["StackStatus"]; } + if (output["StackStatusReason"] !== undefined) { + contents.StackStatusReason = output["StackStatusReason"]; + } if (output["ParentId"] !== undefined) { contents.ParentId = output["ParentId"]; } if (output["RootId"] !== undefined) { contents.RootId = output["RootId"]; } - if (output["TemplateDescription"] !== undefined) { - contents.TemplateDescription = output["TemplateDescription"]; - } - if (output["StackId"] !== undefined) { - contents.StackId = output["StackId"]; - } - if (output["LastUpdatedTime"] !== undefined) { - contents.LastUpdatedTime = new Date(output["LastUpdatedTime"]); - } - if (output["CreationTime"] !== undefined) { - contents.CreationTime = new Date(output["CreationTime"]); - } - if (output["StackName"] !== undefined) { - contents.StackName = output["StackName"]; + if (output["DriftInformation"] !== undefined) { + contents.DriftInformation = deserializeAws_queryStackDriftInformationSummary(output["DriftInformation"], context); } return contents; }; @@ -8940,15 +8978,15 @@ const deserializeAws_queryStopStackSetOperationOutput = ( const deserializeAws_queryTag = (output: any, context: __SerdeContext): Tag => { let contents: any = { - Value: undefined, Key: undefined, + Value: undefined, }; - if (output["Value"] !== undefined) { - contents.Value = output["Value"]; - } if (output["Key"] !== undefined) { contents.Key = output["Key"]; } + if (output["Value"] !== undefined) { + contents.Value = output["Value"]; + } return contents; }; @@ -8959,22 +8997,22 @@ const deserializeAws_queryTags = (output: any, context: __SerdeContext): Tag[] = const deserializeAws_queryTemplateParameter = (output: any, context: __SerdeContext): TemplateParameter => { let contents: any = { ParameterKey: undefined, - Description: undefined, DefaultValue: undefined, NoEcho: undefined, + Description: undefined, }; if (output["ParameterKey"] !== undefined) { contents.ParameterKey = output["ParameterKey"]; } - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; - } if (output["DefaultValue"] !== undefined) { contents.DefaultValue = output["DefaultValue"]; } if (output["NoEcho"] !== undefined) { contents.NoEcho = output["NoEcho"] == "true"; } + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; + } return contents; }; @@ -9015,21 +9053,21 @@ const deserializeAws_queryTypeSummaries = (output: any, context: __SerdeContext) const deserializeAws_queryTypeSummary = (output: any, context: __SerdeContext): TypeSummary => { let contents: any = { - Description: undefined, - TypeName: undefined, Type: undefined, + TypeName: undefined, + DefaultVersionId: undefined, TypeArn: undefined, LastUpdated: undefined, - DefaultVersionId: undefined, + Description: undefined, }; - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; + if (output["Type"] !== undefined) { + contents.Type = output["Type"]; } if (output["TypeName"] !== undefined) { contents.TypeName = output["TypeName"]; } - if (output["Type"] !== undefined) { - contents.Type = output["Type"]; + if (output["DefaultVersionId"] !== undefined) { + contents.DefaultVersionId = output["DefaultVersionId"]; } if (output["TypeArn"] !== undefined) { contents.TypeArn = output["TypeArn"]; @@ -9037,8 +9075,8 @@ const deserializeAws_queryTypeSummary = (output: any, context: __SerdeContext): if (output["LastUpdated"] !== undefined) { contents.LastUpdated = new Date(output["LastUpdated"]); } - if (output["DefaultVersionId"] !== undefined) { - contents.DefaultVersionId = output["DefaultVersionId"]; + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; } return contents; }; @@ -9049,34 +9087,34 @@ const deserializeAws_queryTypeVersionSummaries = (output: any, context: __SerdeC const deserializeAws_queryTypeVersionSummary = (output: any, context: __SerdeContext): TypeVersionSummary => { let contents: any = { + Type: undefined, + TypeName: undefined, + VersionId: undefined, IsDefaultVersion: undefined, - Description: undefined, Arn: undefined, - TypeName: undefined, - Type: undefined, TimeCreated: undefined, - VersionId: undefined, + Description: undefined, }; + if (output["Type"] !== undefined) { + contents.Type = output["Type"]; + } + if (output["TypeName"] !== undefined) { + contents.TypeName = output["TypeName"]; + } + if (output["VersionId"] !== undefined) { + contents.VersionId = output["VersionId"]; + } if (output["IsDefaultVersion"] !== undefined) { contents.IsDefaultVersion = output["IsDefaultVersion"] == "true"; } - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; - } if (output["Arn"] !== undefined) { contents.Arn = output["Arn"]; } - if (output["TypeName"] !== undefined) { - contents.TypeName = output["TypeName"]; - } - if (output["Type"] !== undefined) { - contents.Type = output["Type"]; - } if (output["TimeCreated"] !== undefined) { contents.TimeCreated = new Date(output["TimeCreated"]); } - if (output["VersionId"] !== undefined) { - contents.VersionId = output["VersionId"]; + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; } return contents; }; @@ -9129,24 +9167,24 @@ const deserializeAws_queryUpdateTerminationProtectionOutput = ( const deserializeAws_queryValidateTemplateOutput = (output: any, context: __SerdeContext): ValidateTemplateOutput => { let contents: any = { + Parameters: undefined, Description: undefined, - DeclaredTransforms: undefined, Capabilities: undefined, CapabilitiesReason: undefined, - Parameters: undefined, + DeclaredTransforms: undefined, }; - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; - } - if (output.DeclaredTransforms === "") { - contents.DeclaredTransforms = []; + if (output.Parameters === "") { + contents.Parameters = []; } - if (output["DeclaredTransforms"] !== undefined && output["DeclaredTransforms"]["member"] !== undefined) { - contents.DeclaredTransforms = deserializeAws_queryTransformsList( - __getArrayIfSingleItem(output["DeclaredTransforms"]["member"]), + if (output["Parameters"] !== undefined && output["Parameters"]["member"] !== undefined) { + contents.Parameters = deserializeAws_queryTemplateParameters( + __getArrayIfSingleItem(output["Parameters"]["member"]), context ); } + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; + } if (output.Capabilities === "") { contents.Capabilities = []; } @@ -9159,12 +9197,12 @@ const deserializeAws_queryValidateTemplateOutput = (output: any, context: __Serd if (output["CapabilitiesReason"] !== undefined) { contents.CapabilitiesReason = output["CapabilitiesReason"]; } - if (output.Parameters === "") { - contents.Parameters = []; + if (output.DeclaredTransforms === "") { + contents.DeclaredTransforms = []; } - if (output["Parameters"] !== undefined && output["Parameters"]["member"] !== undefined) { - contents.Parameters = deserializeAws_queryTemplateParameters( - __getArrayIfSingleItem(output["Parameters"]["member"]), + if (output["DeclaredTransforms"] !== undefined && output["DeclaredTransforms"]["member"] !== undefined) { + contents.DeclaredTransforms = deserializeAws_queryTransformsList( + __getArrayIfSingleItem(output["DeclaredTransforms"]["member"]), context ); } diff --git a/clients/client-cloudtrail/CloudTrail.ts b/clients/client-cloudtrail/CloudTrail.ts index 4adcb2b6762f..cbbc15174a8c 100644 --- a/clients/client-cloudtrail/CloudTrail.ts +++ b/clients/client-cloudtrail/CloudTrail.ts @@ -465,7 +465,7 @@ export class CloudTrail extends CloudTrailClient { * maximum of 50 possible. The response includes a token that you can use to get the next page * of results. *The rate of lookup requests is limited to two per second per account. If this + *
The rate of lookup requests is limited to two per second, per account, per region. If this * limit is exceeded, a throttling error occurs.
*The rate of lookup requests is limited to two per second per account. If this + *
The rate of lookup requests is limited to two per second, per account, per region. If this * limit is exceeded, a throttling error occurs.
*The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters.
+ *The key in a key-value pair. The key must be must be no longer than 128 Unicode characters. The key must be unique for the resource to which it applies.
*/ - Value?: string; + Key: string | undefined; /** - *The key in a key-value pair. The key must be must be no longer than 128 Unicode characters. The key must be unique for the resource to which it applies.
+ *The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters.
*/ - Key: string | undefined; + Value?: string; } export namespace Tag { @@ -243,6 +243,33 @@ export namespace UnsupportedOperationException { }); } +export interface AdvancedFieldSelector { + Field: string | undefined; + StartsWith?: string[]; + NotStartsWith?: string[]; + EndsWith?: string[]; + Equals?: string[]; + NotEndsWith?: string[]; + NotEquals?: string[]; +} + +export namespace AdvancedFieldSelector { + export const filterSensitiveLog = (obj: AdvancedFieldSelector): any => ({ + ...obj, + }); +} + +export interface AdvancedEventSelector { + FieldSelectors: AdvancedFieldSelector[] | undefined; + Name: string | undefined; +} + +export namespace AdvancedEventSelector { + export const filterSensitiveLog = (obj: AdvancedEventSelector): any => ({ + ...obj, + }); +} + /** *This exception is thrown when trusted access has not been enabled between AWS CloudTrail and AWS Organizations. For more information, * see Enabling Trusted Access with Other AWS Services @@ -286,27 +313,26 @@ export namespace CloudWatchLogsDeliveryUnavailableException { */ export interface CreateTrailRequest { /** - *
Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.
- */ - SnsTopicName?: string; - - /** - *Specifies whether the trail is publishing events from global services such as IAM to the log files.
- */ - IncludeGlobalServiceEvents?: boolean; - - /** - *Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group - * to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.
- */ - CloudWatchLogsLogGroupArn?: string; - - /** - *Specifies whether the trail is created for all accounts in an organization in AWS Organizations, or only for the current AWS account. - * The default is false, and cannot be true unless the call is made on behalf of an AWS account that is the master account for an organization in - * AWS Organizations.
+ *Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The + * value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully + * specified ARN to a key, or a globally unique identifier.
+ *Examples:
+ *alias/MyAliasName
+ *arn:aws:kms:us-east-2:123456789012:alias/MyAliasName
+ *arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012
+ *12345678-1234-1234-1234-123456789012
+ *Specifies whether the trail is created in the current region or in all regions. The default is false, which creates a trail only in the region where you are signed in. As a best practice, consider @@ -314,6 +340,14 @@ export interface CreateTrailRequest { */ IsMultiRegionTrail?: boolean; + /** + *
Specifies whether log file integrity validation is enabled. The default is false.
+ *When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail.
+ *Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated * for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.
@@ -321,12 +355,10 @@ export interface CreateTrailRequest { S3KeyPrefix?: string; /** - *Specifies whether log file integrity validation is enabled. The default is false.
- *When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail will not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail.
- *Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group + * to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.
*/ - EnableLogFileValidation?: boolean; + CloudWatchLogsLogGroupArn?: string; /** *Specifies the name of the trail. The name must meet the following requirements:
@@ -352,9 +384,14 @@ export interface CreateTrailRequest { Name: string | undefined; /** - *Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
+ *Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.
*/ - CloudWatchLogsRoleArn?: string; + S3BucketName: string | undefined; + + /** + *Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.
+ */ + SnsTopicName?: string; /** *A list of tags.
@@ -362,31 +399,21 @@ export interface CreateTrailRequest { TagsList?: Tag[]; /** - *Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The - * value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully - * specified ARN to a key, or a globally unique identifier.
- *Examples:
- *alias/MyAliasName
- *arn:aws:kms:us-east-2:123456789012:alias/MyAliasName
- *arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012
- *12345678-1234-1234-1234-123456789012
- *Specifies whether the trail is created for all accounts in an organization in AWS Organizations, or only for the current AWS account. + * The default is false, and cannot be true unless the call is made on behalf of an AWS account that is the master account for an organization in + * AWS Organizations.
*/ - KmsKeyId?: string; + IsOrganizationTrail?: boolean; /** - *Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.
+ *Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
*/ - S3BucketName: string | undefined; + CloudWatchLogsRoleArn?: string; + + /** + *Specifies whether the trail is publishing events from global services such as IAM to the log files.
+ */ + IncludeGlobalServiceEvents?: boolean; } export namespace CreateTrailRequest { @@ -399,6 +426,11 @@ export namespace CreateTrailRequest { *Returns the objects or data listed below if successful. Otherwise, returns an error.
*/ export interface CreateTrailResponse { + /** + *Specifies whether the trail is publishing events from global services such as IAM to the log files.
+ */ + IncludeGlobalServiceEvents?: boolean; + /** *Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:
*@@ -407,25 +439,15 @@ export interface CreateTrailResponse { */ KmsKeyId?: string; - /** - *
This field is no longer in use. Use SnsTopicARN.
- */ - SnsTopicName?: string; - - /** - *Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
- */ - CloudWatchLogsRoleArn?: string; - /** *Specifies whether the trail is an organization trail.
*/ IsOrganizationTrail?: boolean; /** - *Specifies the name of the trail.
+ *Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
*/ - Name?: string; + CloudWatchLogsRoleArn?: string; /** *Specifies whether the trail exists in one region or in all regions.
@@ -433,10 +455,9 @@ export interface CreateTrailResponse { IsMultiRegionTrail?: boolean; /** - *Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated - * for log file delivery. For more information, see Finding Your CloudTrail Log Files.
+ *Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.
*/ - S3KeyPrefix?: string; + CloudWatchLogsLogGroupArn?: string; /** *Specifies the ARN of the trail that was created. The format of a trail ARN @@ -448,22 +469,22 @@ export interface CreateTrailResponse { TrailARN?: string; /** - *
Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications when log files are delivered. The format of a topic ARN is:
- *
- * arn:aws:sns:us-east-2:123456789012:MyTopic
- *
Specifies whether log file integrity validation is enabled.
*/ - SnsTopicARN?: string; + LogFileValidationEnabled?: boolean; /** - *Specifies whether the trail is publishing events from global services such as IAM to the log files.
+ *This field is no longer in use. Use SnsTopicARN.
*/ - IncludeGlobalServiceEvents?: boolean; + SnsTopicName?: string; /** - *Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.
+ *Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications when log files are delivered. The format of a topic ARN is:
+ *
+ * arn:aws:sns:us-east-2:123456789012:MyTopic
+ *
Specifies the name of the Amazon S3 bucket designated for publishing log files.
@@ -471,9 +492,15 @@ export interface CreateTrailResponse { S3BucketName?: string; /** - *Specifies whether log file integrity validation is enabled.
+ *Specifies the name of the trail.
*/ - LogFileValidationEnabled?: boolean; + Name?: string; + + /** + *Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated + * for log file delivery. For more information, see Finding Your CloudTrail Log Files.
+ */ + S3KeyPrefix?: string; } export namespace CreateTrailResponse { @@ -719,7 +746,9 @@ export namespace KmsKeyDisabledException { } /** - *This exception is thrown when the KMS key does not exist, or when the S3 bucket and the KMS key are not in the same region.
+ *This exception is thrown when the KMS key does not exist, when the S3 bucket and the KMS key are not + * in the same region, or when the KMS key associated with the SNS topic either does not exist or is not + * in the same region.
*/ export interface KmsKeyNotFoundException extends __SmithyException, $MetadataBearer { name: "KmsKeyNotFoundException"; @@ -917,13 +946,6 @@ export namespace TrailNotFoundException { *Returns information about the trail.
*/ export interface DescribeTrailsRequest { - /** - *Specifies whether to include shadow trails in the response. A shadow trail is the replication in a region of a trail that was created in a different region, - * or in the case of an organization trail, the replication of an organization trail in member accounts. If you do not include shadow trails, organization trails in a member account - * and region replication trails will not be returned. The default is true.
- */ - includeShadowTrails?: boolean; - /** *Specifies a list of trail names, trail ARNs, or both, of the trails to describe. The format of a trail ARN is:
*@@ -945,6 +967,13 @@ export interface DescribeTrailsRequest { * */ trailNameList?: string[]; + + /** + *
Specifies whether to include shadow trails in the response. A shadow trail is the replication in a region of a trail that was created in a different region, + * or in the case of an organization trail, the replication of an organization trail in member accounts. If you do not include shadow trails, organization trails in a member account + * and region replication trails will not be returned. The default is true.
+ */ + includeShadowTrails?: boolean; } export namespace DescribeTrailsRequest { @@ -958,48 +987,21 @@ export namespace DescribeTrailsRequest { */ export interface Trail { /** - *Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:
- *
- * arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012
- *
The region in which the trail was created.
- */ - HomeRegion?: string; - - /** - *Specifies whether the trail is an organization trail.
- */ - IsOrganizationTrail?: boolean; - - /** - *This field is no longer in use. Use SnsTopicARN.
+ *Name of the trail set by calling CreateTrail. The maximum length is 128 + * characters.
*/ - SnsTopicName?: string; + Name?: string; /** - *Specifies if the trail has custom event selectors.
+ *Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket Naming Requirements.
*/ - HasCustomEventSelectors?: boolean; + S3BucketName?: string; /** *Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
*/ CloudWatchLogsRoleArn?: string; - /** - *Specifies whether log file validation is enabled.
- */ - LogFileValidationEnabled?: boolean; - - /** - *Specifies whether the trail exists only in one region or exists in all regions.
- */ - IsMultiRegionTrail?: boolean; - /** *Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications * when log files are delivered. The format of a topic ARN is:
@@ -1010,15 +1012,15 @@ export interface Trail { SnsTopicARN?: string; /** - *Specifies an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered.
+ *Set to True to include AWS API calls from AWS global services such as IAM. + * Otherwise, False.
*/ - CloudWatchLogsLogGroupArn?: string; + IncludeGlobalServiceEvents?: boolean; /** - *Name of the trail set by calling CreateTrail. The maximum length is 128 - * characters.
+ *Specifies whether log file validation is enabled.
*/ - Name?: string; + LogFileValidationEnabled?: boolean; /** *Specifies the ARN of the trail. The format of a trail ARN is:
@@ -1029,41 +1031,68 @@ export interface Trail { TrailARN?: string; /** - *Specifies whether a trail has insight types specified in an InsightSelector
list.
Specifies an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered.
*/ - HasInsightSelectors?: boolean; + CloudWatchLogsLogGroupArn?: string; /** - *Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket Naming Requirements.
+ *Specifies if the trail has custom event selectors.
*/ - S3BucketName?: string; + HasCustomEventSelectors?: boolean; /** - *Set to True to include AWS API calls from AWS global services such as IAM. - * Otherwise, False.
+ *Specifies whether the trail exists only in one region or exists in all regions.
*/ - IncludeGlobalServiceEvents?: boolean; + IsMultiRegionTrail?: boolean; /** *Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated * for log file delivery. For more information, see Finding Your CloudTrail Log Files.The maximum length is 200 characters.
*/ S3KeyPrefix?: string; -} - -export namespace Trail { - export const filterSensitiveLog = (obj: Trail): any => ({ - ...obj, - }); -} -/** - *Returns the objects or data listed below if successful. Otherwise, returns an error.
- */ -export interface DescribeTrailsResponse { /** - *The list of trail objects. Trail objects with string values are only returned if values for the objects exist in a trail's configuration.
- * For example, SNSTopicName
and SNSTopicARN
are only returned in results if a trail is configured to send SNS notifications. Similarly,
+ *
This field is no longer in use. Use SnsTopicARN.
+ */ + SnsTopicName?: string; + + /** + *Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:
+ *
+ * arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012
+ *
The region in which the trail was created.
+ */ + HomeRegion?: string; + + /** + *Specifies whether a trail has insight types specified in an InsightSelector
list.
Specifies whether the trail is an organization trail.
+ */ + IsOrganizationTrail?: boolean; +} + +export namespace Trail { + export const filterSensitiveLog = (obj: Trail): any => ({ + ...obj, + }); +} + +/** + *Returns the objects or data listed below if successful. Otherwise, returns an error.
+ */ +export interface DescribeTrailsResponse { + /** + *The list of trail objects. Trail objects with string values are only returned if values for the objects exist in a trail's configuration.
+ * For example, SNSTopicName
and SNSTopicARN
are only returned in results if a trail is configured to send SNS notifications. Similarly,
* KMSKeyId
only appears in results if a trail's log files are encrypted with AWS KMS-managed keys.
The resource type in which you want to log data events. You can specify AWS::S3::Object
or
+ * AWS::Lambda::Function
resources.
An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified objects.
*The resource type in which you want to log data events. You can specify AWS::S3::Object
or
- * AWS::Lambda::Function
resources.
You can configure up to five event selectors for a trail.
*/ export interface EventSelector { - /** - *An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out AWS Key Management Service events by
- * containing "kms.amazonaws.com"
. By default, ExcludeManagementEventSources
is empty, and AWS KMS events are included in events that are logged to your trail.
Specify if you want your trail to log read-only events, write-only events, or all. For example,
* the EC2 GetConsoleOutput
is a read-only API operation and
@@ -1247,12 +1270,21 @@ export interface EventSelector {
*/
ReadWriteType?: ReadWriteType | string;
+ /**
+ *
An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out AWS Key Management Service events by
+ * containing "kms.amazonaws.com"
. By default, ExcludeManagementEventSources
is empty, and AWS KMS events are included in events that are logged to your trail.
Specify if you want your event selector to include management events for your trail.
** For more information, see Management Events in the AWS CloudTrail User Guide.
* *By default, the value is true
.
The first copy of management events is free. You are charged for additional copies of management + * events that you are logging on any subsequent trail in the same region. For more information about + * CloudTrail pricing, see AWS CloudTrail Pricing.
*/ IncludeManagementEvents?: boolean; @@ -1273,15 +1305,16 @@ export namespace EventSelector { } export interface GetEventSelectorsResponse { + AdvancedEventSelectors?: AdvancedEventSelector[]; /** - *The specified trail ARN that has the event selectors.
+ *The event selectors that are configured for the trail.
*/ - TrailARN?: string; + EventSelectors?: EventSelector[]; /** - *The event selectors that are configured for the trail.
+ *The specified trail ARN that has the event selectors.
*/ - EventSelectors?: EventSelector[]; + TrailARN?: string; } export namespace GetEventSelectorsResponse { @@ -1348,14 +1381,14 @@ export namespace InsightSelector { export interface GetInsightSelectorsResponse { /** - *A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight
is supported as an insight type.
The Amazon Resource Name (ARN) of a trail for which you want to get Insights selectors.
*/ - InsightSelectors?: InsightSelector[]; + TrailARN?: string; /** - *The Amazon Resource Name (ARN) of a trail for which you want to get Insights selectors.
+ *A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight
is supported as an insight type.
Displays any Amazon SNS error that CloudTrail encountered when attempting to send a - * notification. For more information about Amazon SNS errors, see the Amazon SNS Developer Guide. - *
- */ - LatestNotificationError?: string; - - /** - *Specifies the date and time that CloudTrail last delivered log files to an account's Amazon S3 bucket.
- */ - LatestDeliveryTime?: Date; - - /** - *Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files - * to the designated bucket. For more information see the topic Error - * Responses in the Amazon S3 API Reference.
- *This error occurs only when there is a problem with the destination S3 bucket and will
- * not occur for timeouts. To resolve the issue, create a new bucket and call
- * UpdateTrail
to specify the new bucket, or fix the existing objects so that
- * CloudTrail can again write to the bucket.
Specifies the most recent date and time when CloudTrail stopped recording API calls for an AWS account.
*/ - LatestDeliveryError?: string; + StopLoggingTime?: Date; /** *This field is no longer in use.
*/ - LatestNotificationAttemptSucceeded?: string; + LatestDeliveryAttemptSucceeded?: string; /** *Displays the most recent date and time when CloudTrail delivered logs to CloudWatch Logs.
@@ -1467,54 +1480,69 @@ export interface GetTrailStatusResponse { LatestCloudWatchLogsDeliveryTime?: Date; /** - *This field is no longer in use.
+ *Displays any CloudWatch Logs error that CloudTrail encountered when attempting to deliver logs to CloudWatch Logs.
*/ - TimeLoggingStopped?: string; + LatestCloudWatchLogsDeliveryError?: string; /** *This field is no longer in use.
*/ LatestDeliveryAttemptTime?: string; + /** + *Specifies the date and time that CloudTrail last delivered a digest file to an account's Amazon S3 bucket.
+ */ + LatestDigestDeliveryTime?: Date; + /** *This field is no longer in use.
*/ - LatestNotificationAttemptTime?: string; + TimeLoggingStopped?: string; /** - *Specifies the most recent date and time when CloudTrail started recording API calls for an AWS account.
+ *Whether the CloudTrail is currently logging AWS API calls.
*/ - StartLoggingTime?: Date; + IsLogging?: boolean; /** *This field is no longer in use.
*/ - TimeLoggingStarted?: string; + LatestNotificationAttemptTime?: string; /** - *Displays any CloudWatch Logs error that CloudTrail encountered when attempting to deliver logs to CloudWatch Logs.
+ *Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files + * to the designated bucket. For more information see the topic Error + * Responses in the Amazon S3 API Reference.
+ *This error occurs only when there is a problem with the destination S3 bucket and will
+ * not occur for timeouts. To resolve the issue, create a new bucket and call
+ * UpdateTrail
to specify the new bucket, or fix the existing objects so that
+ * CloudTrail can again write to the bucket.
Whether the CloudTrail is currently logging AWS API calls.
+ *This field is no longer in use.
*/ - IsLogging?: boolean; + LatestNotificationAttemptSucceeded?: string; /** - *Specifies the date and time that CloudTrail last delivered a digest file to an account's Amazon S3 bucket.
+ *Specifies the date and time of the most recent Amazon SNS notification that CloudTrail has written a new log file to an account's Amazon S3 bucket.
*/ - LatestDigestDeliveryTime?: Date; + LatestNotificationTime?: Date; /** - *This field is no longer in use.
+ *Specifies the most recent date and time when CloudTrail started recording API calls for an AWS account.
*/ - LatestDeliveryAttemptSucceeded?: string; + StartLoggingTime?: Date; /** - *Specifies the date and time of the most recent Amazon SNS notification that CloudTrail has written a new log file to an account's Amazon S3 bucket.
+ *Displays any Amazon SNS error that CloudTrail encountered when attempting to send a + * notification. For more information about Amazon SNS errors, see the Amazon SNS Developer Guide. + *
*/ - LatestNotificationTime?: Date; + LatestNotificationError?: string; /** *Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest @@ -1530,9 +1558,14 @@ export interface GetTrailStatusResponse { LatestDigestDeliveryError?: string; /** - *
Specifies the most recent date and time when CloudTrail stopped recording API calls for an AWS account.
+ *This field is no longer in use.
*/ - StopLoggingTime?: Date; + TimeLoggingStarted?: string; + + /** + *Specifies the date and time that CloudTrail last delivered log files to an account's Amazon S3 bucket.
+ */ + LatestDeliveryTime?: Date; } export namespace GetTrailStatusResponse { @@ -1587,14 +1620,14 @@ export interface ListPublicKeysRequest { StartTime?: Date; /** - *Reserved for future use.
+ *Optionally specifies, in UTC, the end of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used.
*/ - NextToken?: string; + EndTime?: Date; /** - *Optionally specifies, in UTC, the end of the time range to look up public keys for CloudTrail digest files. If not specified, the current time is used.
+ *Reserved for future use.
*/ - EndTime?: Date; + NextToken?: string; } export namespace ListPublicKeysRequest { @@ -1608,9 +1641,9 @@ export namespace ListPublicKeysRequest { */ export interface PublicKey { /** - *The DER encoded public key value in PKCS#1 format.
+ *The ending time of validity of the public key.
*/ - Value?: Uint8Array; + ValidityEndTime?: Date; /** *The starting time of validity of the public key.
@@ -1618,9 +1651,9 @@ export interface PublicKey { ValidityStartTime?: Date; /** - *The ending time of validity of the public key.
+ *The DER encoded public key value in PKCS#1 format.
*/ - ValidityEndTime?: Date; + Value?: Uint8Array; /** *The fingerprint of the public key.
@@ -1708,14 +1741,14 @@ export namespace ResourceTag { */ export interface ListTagsResponse { /** - *Reserved for future use.
+ *A list of resource tags.
*/ - NextToken?: string; + ResourceTagList?: ResourceTag[]; /** - *A list of resource tags.
+ *Reserved for future use.
*/ - ResourceTagList?: ResourceTag[]; + NextToken?: string; } export namespace ListTagsResponse { @@ -1744,6 +1777,11 @@ export namespace ListTrailsRequest { *Information about a CloudTrail trail, including the trail's name, home region, and Amazon Resource Name (ARN).
*/ export interface TrailInfo { + /** + *The name of a trail.
+ */ + Name?: string; + /** *The AWS region in which a trail was created.
*/ @@ -1753,11 +1791,6 @@ export interface TrailInfo { *The ARN of a trail.
*/ TrailARN?: string; - - /** - *The name of a trail.
- */ - Name?: string; } export namespace TrailInfo { @@ -1767,6 +1800,11 @@ export namespace TrailInfo { } export interface ListTrailsResponse { + /** + *Returns the name, ARN, and home region of trails in the current account.
+ */ + Trails?: TrailInfo[]; + /** *The token to use to get the next page of results after a previous API call. If the token does not appear, * there are no more results to return. The token must be passed in with the same parameters as the previous call. @@ -1774,11 +1812,6 @@ export interface ListTrailsResponse { * NextToken should include those same parameters.
*/ NextToken?: string; - - /** - *Returns the name, ARN, and home region of trails in the current account.
- */ - Trails?: TrailInfo[]; } export namespace ListTrailsResponse { @@ -1900,15 +1933,15 @@ export namespace LookupAttribute { */ export interface LookupEventsRequest { /** - *The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. - * For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.
+ *Specifies that only events that occur before or at the specified time are returned. If the specified end time is before the specified start time, an error is returned.
*/ - NextToken?: string; + EndTime?: Date; /** - *Contains a list of lookup attributes. Currently the list can contain only one item.
+ *Specifies the event category. If you do not specify an event category, events of the category are not returned in the response. For example,
+ * if you do not specify insight
as the value of EventCategory
, no Insights events are returned.
The number of events to return. Possible values are 1 through 50. The default is 50.
@@ -1916,20 +1949,20 @@ export interface LookupEventsRequest { MaxResults?: number; /** - *Specifies the event category. If you do not specify an event category, events of the category are not returned in the response. For example,
- * if you do not specify insight
as the value of EventCategory
, no Insights events are returned.
Contains a list of lookup attributes. Currently the list can contain only one item.
*/ - EventCategory?: EventCategory | string; + LookupAttributes?: LookupAttribute[]; /** - *Specifies that only events that occur after or at the specified time are returned. If the specified start time is after the specified end time, an error is returned.
+ *The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. + * For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.
*/ - StartTime?: Date; + NextToken?: string; /** - *Specifies that only events that occur before or at the specified time are returned. If the specified end time is before the specified start time, an error is returned.
+ *Specifies that only events that occur after or at the specified time are returned. If the specified start time is after the specified end time, an error is returned.
*/ - EndTime?: Date; + StartTime?: Date; } export namespace LookupEventsRequest { @@ -1942,11 +1975,6 @@ export namespace LookupEventsRequest { *Specifies the type and name of a resource referenced by an event.
*/ export interface Resource { - /** - *The name of the resource referenced by the event returned. These are user-created names whose values will depend on the environment. For example, the resource name might be "auto-scaling-test-group" for an Auto Scaling Group or "i-1234567" for an EC2 Instance.
- */ - ResourceName?: string; - /** *The type of a resource referenced by the event returned. When the resource type cannot be * determined, null is returned. Some examples of resource types are: Instance for EC2, @@ -1954,6 +1982,11 @@ export interface Resource { * To learn more about how to look up and filter events by the resource types supported for a service, see Filtering CloudTrail Events.
*/ ResourceType?: string; + + /** + *The name of the resource referenced by the event returned. These are user-created names whose values will depend on the environment. For example, the resource name might be "auto-scaling-test-group" for an Auto Scaling Group or "i-1234567" for an EC2 Instance.
+ */ + ResourceName?: string; } export namespace Resource { @@ -1966,21 +1999,6 @@ export namespace Resource { *Contains information about an event that was returned by a lookup request. The result includes a representation of a CloudTrail event.
*/ export interface Event { - /** - *Information about whether the event is a write event or a read event.
- */ - ReadOnly?: string; - - /** - *A list of resources referenced by the event returned.
- */ - Resources?: Resource[]; - - /** - *The name of the event returned.
- */ - EventName?: string; - /** *The AWS access key ID that was used to sign the request. If the request was made * with temporary security credentials, this is the access key ID of the temporary credentials.
@@ -1988,9 +2006,9 @@ export interface Event { AccessKeyId?: string; /** - *The CloudTrail ID of the event returned.
+ *The AWS service that the request was made to.
*/ - EventId?: string; + EventSource?: string; /** *The date and time of the event returned.
@@ -2002,15 +2020,30 @@ export interface Event { */ Username?: string; + /** + *Information about whether the event is a write event or a read event.
+ */ + ReadOnly?: string; + /** *A JSON string that contains a representation of the event returned.
*/ CloudTrailEvent?: string; /** - *The AWS service that the request was made to.
+ *A list of resources referenced by the event returned.
*/ - EventSource?: string; + Resources?: Resource[]; + + /** + *The name of the event returned.
+ */ + EventName?: string; + + /** + *The CloudTrail ID of the event returned.
+ */ + EventId?: string; } export namespace Event { @@ -2023,11 +2056,6 @@ export namespace Event { *Contains a response to a LookupEvents action.
*/ export interface LookupEventsResponse { - /** - *A list of events returned based on the lookup attributes specified and the CloudTrail event. The events list is sorted by time. The most recent event is listed first.
- */ - Events?: Event[]; - /** *The token to use to get the next page of results after a previous API call. If the token does not appear, * there are no more results to return. The token must be passed in with the same parameters as the previous call. @@ -2035,6 +2063,11 @@ export interface LookupEventsResponse { * NextToken should include those same parameters.
*/ NextToken?: string; + + /** + *A list of events returned based on the lookup attributes specified and the CloudTrail event. The events list is sorted by time. The most recent event is listed first.
+ */ + Events?: Event[]; } export namespace LookupEventsResponse { @@ -2080,6 +2113,12 @@ export namespace InvalidEventSelectorsException { } export interface PutEventSelectorsRequest { + AdvancedEventSelectors?: AdvancedEventSelector[]; + /** + *Specifies the settings for your event selectors. You can configure up to five event selectors for a trail.
+ */ + EventSelectors?: EventSelector[]; + /** *Specifies the name of the trail or trail ARN. If you specify a trail name, the * string must meet the following requirements:
@@ -2107,11 +2146,6 @@ export interface PutEventSelectorsRequest { * */ TrailName: string | undefined; - - /** - *Specifies the settings for your event selectors. You can configure up to five event selectors for a trail.
- */ - EventSelectors: EventSelector[] | undefined; } export namespace PutEventSelectorsRequest { @@ -2121,6 +2155,11 @@ export namespace PutEventSelectorsRequest { } export interface PutEventSelectorsResponse { + /** + *Specifies the event selectors configured for your trail.
+ */ + EventSelectors?: EventSelector[]; + /** *Specifies the ARN of the trail that was updated with event selectors. The format of a trail ARN * is:
@@ -2130,10 +2169,7 @@ export interface PutEventSelectorsResponse { */ TrailARN?: string; - /** - *Specifies the event selectors configured for your trail.
- */ - EventSelectors?: EventSelector[]; + AdvancedEventSelectors?: AdvancedEventSelector[]; } export namespace PutEventSelectorsResponse { @@ -2181,14 +2217,14 @@ export namespace PutInsightSelectorsRequest { export interface PutInsightSelectorsResponse { /** - *The Amazon Resource Name (ARN) of a trail for which you want to change or add Insights selectors.
+ *A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight
is supported as an insight type.
A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight
is supported as an insight type.
The Amazon Resource Name (ARN) of a trail for which you want to change or add Insights selectors.
*/ - InsightSelectors?: InsightSelector[]; + TrailARN?: string; } export namespace PutInsightSelectorsResponse { @@ -2201,11 +2237,6 @@ export namespace PutInsightSelectorsResponse { *Specifies the tags to remove from a trail.
*/ export interface RemoveTagsRequest { - /** - *Specifies a list of tags to be removed.
- */ - TagsList?: Tag[]; - /** *Specifies the ARN of the trail from which tags should be removed. The format of a trail ARN is:
*@@ -2213,6 +2244,11 @@ export interface RemoveTagsRequest { *
*/ ResourceId: string | undefined; + + /** + *Specifies a list of tags to be removed.
+ */ + TagsList?: Tag[]; } export namespace RemoveTagsRequest { @@ -2297,59 +2333,36 @@ export namespace StopLoggingResponse { */ export interface UpdateTrailRequest { /** - *Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
+ *Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.
*/ - CloudWatchLogsRoleArn?: string; + CloudWatchLogsLogGroupArn?: string; /** - *Specifies the name of the trail or trail ARN. If Name
is a trail name, the
- * string must meet the following requirements:
Specifies whether the trail is publishing events from global services such as IAM to the log files.
+ */ + IncludeGlobalServiceEvents?: boolean; + + /** + *Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The + * value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully + * specified ARN to a key, or a globally unique identifier.
+ *Examples:
*Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)
- *Start with a letter or number, and end with a letter or number
+ *alias/MyAliasName
*Be between 3 and 128 characters
+ *arn:aws:kms:us-east-2:123456789012:alias/MyAliasName
*Have no adjacent periods, underscores or dashes. Names like my-_namespace
- * and my--namespace
are invalid.
arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012
*Not be in IP address format (for example, 192.168.5.4)
+ *12345678-1234-1234-1234-123456789012
*If Name
is a trail ARN, it must be in the format:
- * arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
- *
Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.
*/ - S3BucketName?: string; - - /** - *Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, - * shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region - * where it was created, and its shadow trails in other regions will be deleted. As a best practice, consider - * using trails that log events in all regions.
- */ - IsMultiRegionTrail?: boolean; - - /** - *Specifies whether the trail is applied to all accounts in an organization in AWS Organizations, or only for the current AWS account. - * The default is false, and cannot be true unless the call is made on behalf of an AWS account that is the master account for an organization in - * AWS Organizations. If the trail is not an organization trail and this is set to true, the trail will be created in all AWS accounts that belong - * to the organization. If the trail is an organization trail and this is set to false, the trail will remain in the current AWS account but be - * deleted from all member accounts in the organization.
- */ - IsOrganizationTrail?: boolean; + KmsKeyId?: string; /** *Specifies whether log file validation is enabled. The default is false.
@@ -2360,47 +2373,70 @@ export interface UpdateTrailRequest { EnableLogFileValidation?: boolean; /** - *Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.
+ *Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.
*/ - CloudWatchLogsLogGroupArn?: string; + SnsTopicName?: string; /** - *Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.
+ *Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.
*/ - SnsTopicName?: string; + S3BucketName?: string; /** - *Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated - * for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.
+ *Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
*/ - S3KeyPrefix?: string; + CloudWatchLogsRoleArn?: string; /** - *Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The - * value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully - * specified ARN to a key, or a globally unique identifier.
- *Examples:
+ *Specifies the name of the trail or trail ARN. If Name
is a trail name, the
+ * string must meet the following requirements:
alias/MyAliasName
+ *Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)
*arn:aws:kms:us-east-2:123456789012:alias/MyAliasName
+ *Start with a letter or number, and end with a letter or number
*arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012
+ *Be between 3 and 128 characters
*12345678-1234-1234-1234-123456789012
+ *Have no adjacent periods, underscores or dashes. Names like my-_namespace
+ * and my--namespace
are invalid.
Not be in IP address format (for example, 192.168.5.4)
*If Name
is a trail ARN, it must be in the format:
+ * arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
+ *
Specifies whether the trail is publishing events from global services such as IAM to the log files.
+ *Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated + * for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.
*/ - IncludeGlobalServiceEvents?: boolean; + S3KeyPrefix?: string; + + /** + *Specifies whether the trail is applied to all accounts in an organization in AWS Organizations, or only for the current AWS account. + * The default is false, and cannot be true unless the call is made on behalf of an AWS account that is the master account for an organization in + * AWS Organizations. If the trail is not an organization trail and this is set to true, the trail will be created in all AWS accounts that belong + * to the organization. If the trail is an organization trail and this is set to false, the trail will remain in the current AWS account but be + * deleted from all member accounts in the organization.
+ */ + IsOrganizationTrail?: boolean; + + /** + *Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, + * shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region + * where it was created, and its shadow trails in other regions will be deleted. As a best practice, consider + * using trails that log events in all regions.
+ */ + IsMultiRegionTrail?: boolean; } export namespace UpdateTrailRequest { @@ -2414,18 +2450,14 @@ export namespace UpdateTrailRequest { */ export interface UpdateTrailResponse { /** - *This field is no longer in use. Use SnsTopicARN.
+ *Specifies whether the trail is an organization trail.
*/ - SnsTopicName?: string; + IsOrganizationTrail?: boolean; /** - *Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:
- * - *
- * arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012
- *
Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
*/ - KmsKeyId?: string; + CloudWatchLogsRoleArn?: string; /** *Specifies the name of the trail.
@@ -2433,39 +2465,37 @@ export interface UpdateTrailResponse { Name?: string; /** - *Specifies whether the trail is an organization trail.
- */ - IsOrganizationTrail?: boolean; - - /** - *Specifies whether the trail is publishing events from global services such as IAM to the log files.
+ *This field is no longer in use. Use SnsTopicARN.
*/ - IncludeGlobalServiceEvents?: boolean; + SnsTopicName?: string; /** - *Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.
+ *Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. The value is a fully specified ARN to a KMS key in the format:
+ * + *
+ * arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012
+ *
Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated - * for log file delivery. For more information, see Finding Your CloudTrail Log Files.
+ *Specifies whether log file integrity validation is enabled.
*/ - S3KeyPrefix?: string; + LogFileValidationEnabled?: boolean; /** - *Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications - * when log files are delivered. The format of a topic ARN is:
+ *Specifies the ARN of the trail that was updated. The format of a trail ARN + * is:
*
- * arn:aws:sns:us-east-2:123456789012:MyTopic
+ * arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
*
Specifies whether log file integrity validation is enabled.
+ *Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.
*/ - LogFileValidationEnabled?: boolean; + CloudWatchLogsLogGroupArn?: string; /** *Specifies whether the trail exists in one region or in all regions.
@@ -2478,18 +2508,24 @@ export interface UpdateTrailResponse { S3BucketName?: string; /** - *Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.
+ *Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications + * when log files are delivered. The format of a topic ARN is:
+ *
+ * arn:aws:sns:us-east-2:123456789012:MyTopic
+ *
Specifies the ARN of the trail that was updated. The format of a trail ARN - * is:
- *
- * arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail
- *
Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated + * for log file delivery. For more information, see Finding Your CloudTrail Log Files.
*/ - TrailARN?: string; + S3KeyPrefix?: string; + + /** + *Specifies whether the trail is publishing events from global services such as IAM to the log files.
+ */ + IncludeGlobalServiceEvents?: boolean; } export namespace UpdateTrailResponse { diff --git a/clients/client-cloudtrail/protocols/Aws_json1_1.ts b/clients/client-cloudtrail/protocols/Aws_json1_1.ts index 345133c0dfcc..c0c58e84c2ae 100644 --- a/clients/client-cloudtrail/protocols/Aws_json1_1.ts +++ b/clients/client-cloudtrail/protocols/Aws_json1_1.ts @@ -25,6 +25,8 @@ import { UpdateTrailCommandInput, UpdateTrailCommandOutput } from "../commands/U import { AddTagsRequest, AddTagsResponse, + AdvancedEventSelector, + AdvancedFieldSelector, CloudTrailARNInvalidException, CloudTrailAccessNotEnabledException, CloudWatchLogsDeliveryUnavailableException, @@ -3050,6 +3052,37 @@ const serializeAws_json1_1AddTagsRequest = (input: AddTagsRequest, context: __Se }; }; +const serializeAws_json1_1AdvancedEventSelector = (input: AdvancedEventSelector, context: __SerdeContext): any => { + return { + ...(input.FieldSelectors !== undefined && { + FieldSelectors: serializeAws_json1_1AdvancedFieldSelectors(input.FieldSelectors, context), + }), + ...(input.Name !== undefined && { Name: input.Name }), + }; +}; + +const serializeAws_json1_1AdvancedEventSelectors = (input: AdvancedEventSelector[], context: __SerdeContext): any => { + return input.map((entry) => serializeAws_json1_1AdvancedEventSelector(entry, context)); +}; + +const serializeAws_json1_1AdvancedFieldSelector = (input: AdvancedFieldSelector, context: __SerdeContext): any => { + return { + ...(input.EndsWith !== undefined && { EndsWith: serializeAws_json1_1Operator(input.EndsWith, context) }), + ...(input.Equals !== undefined && { Equals: serializeAws_json1_1Operator(input.Equals, context) }), + ...(input.Field !== undefined && { Field: input.Field }), + ...(input.NotEndsWith !== undefined && { NotEndsWith: serializeAws_json1_1Operator(input.NotEndsWith, context) }), + ...(input.NotEquals !== undefined && { NotEquals: serializeAws_json1_1Operator(input.NotEquals, context) }), + ...(input.NotStartsWith !== undefined && { + NotStartsWith: serializeAws_json1_1Operator(input.NotStartsWith, context), + }), + ...(input.StartsWith !== undefined && { StartsWith: serializeAws_json1_1Operator(input.StartsWith, context) }), + }; +}; + +const serializeAws_json1_1AdvancedFieldSelectors = (input: AdvancedFieldSelector[], context: __SerdeContext): any => { + return input.map((entry) => serializeAws_json1_1AdvancedFieldSelector(entry, context)); +}; + const serializeAws_json1_1CreateTrailRequest = (input: CreateTrailRequest, context: __SerdeContext): any => { return { ...(input.CloudWatchLogsLogGroupArn !== undefined && { @@ -3212,11 +3245,18 @@ const serializeAws_json1_1LookupEventsRequest = (input: LookupEventsRequest, con }; }; +const serializeAws_json1_1Operator = (input: string[], context: __SerdeContext): any => { + return input.map((entry) => entry); +}; + const serializeAws_json1_1PutEventSelectorsRequest = ( input: PutEventSelectorsRequest, context: __SerdeContext ): any => { return { + ...(input.AdvancedEventSelectors !== undefined && { + AdvancedEventSelectors: serializeAws_json1_1AdvancedEventSelectors(input.AdvancedEventSelectors, context), + }), ...(input.EventSelectors !== undefined && { EventSelectors: serializeAws_json1_1EventSelectors(input.EventSelectors, context), }), @@ -3298,6 +3338,60 @@ const deserializeAws_json1_1AddTagsResponse = (output: any, context: __SerdeCont return {} as any; }; +const deserializeAws_json1_1AdvancedEventSelector = (output: any, context: __SerdeContext): AdvancedEventSelector => { + return { + FieldSelectors: + output.FieldSelectors !== undefined && output.FieldSelectors !== null + ? deserializeAws_json1_1AdvancedFieldSelectors(output.FieldSelectors, context) + : undefined, + Name: output.Name !== undefined && output.Name !== null ? output.Name : undefined, + } as any; +}; + +const deserializeAws_json1_1AdvancedEventSelectors = ( + output: any, + context: __SerdeContext +): AdvancedEventSelector[] => { + return (output || []).map((entry: any) => deserializeAws_json1_1AdvancedEventSelector(entry, context)); +}; + +const deserializeAws_json1_1AdvancedFieldSelector = (output: any, context: __SerdeContext): AdvancedFieldSelector => { + return { + EndsWith: + output.EndsWith !== undefined && output.EndsWith !== null + ? deserializeAws_json1_1Operator(output.EndsWith, context) + : undefined, + Equals: + output.Equals !== undefined && output.Equals !== null + ? deserializeAws_json1_1Operator(output.Equals, context) + : undefined, + Field: output.Field !== undefined && output.Field !== null ? output.Field : undefined, + NotEndsWith: + output.NotEndsWith !== undefined && output.NotEndsWith !== null + ? deserializeAws_json1_1Operator(output.NotEndsWith, context) + : undefined, + NotEquals: + output.NotEquals !== undefined && output.NotEquals !== null + ? deserializeAws_json1_1Operator(output.NotEquals, context) + : undefined, + NotStartsWith: + output.NotStartsWith !== undefined && output.NotStartsWith !== null + ? deserializeAws_json1_1Operator(output.NotStartsWith, context) + : undefined, + StartsWith: + output.StartsWith !== undefined && output.StartsWith !== null + ? deserializeAws_json1_1Operator(output.StartsWith, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1AdvancedFieldSelectors = ( + output: any, + context: __SerdeContext +): AdvancedFieldSelector[] => { + return (output || []).map((entry: any) => deserializeAws_json1_1AdvancedFieldSelector(entry, context)); +}; + const deserializeAws_json1_1CloudTrailAccessNotEnabledException = ( output: any, context: __SerdeContext @@ -3449,6 +3543,10 @@ const deserializeAws_json1_1GetEventSelectorsResponse = ( context: __SerdeContext ): GetEventSelectorsResponse => { return { + AdvancedEventSelectors: + output.AdvancedEventSelectors !== undefined && output.AdvancedEventSelectors !== null + ? deserializeAws_json1_1AdvancedEventSelectors(output.AdvancedEventSelectors, context) + : undefined, EventSelectors: output.EventSelectors !== undefined && output.EventSelectors !== null ? deserializeAws_json1_1EventSelectors(output.EventSelectors, context) @@ -3854,6 +3952,10 @@ const deserializeAws_json1_1OperationNotPermittedException = ( } as any; }; +const deserializeAws_json1_1Operator = (output: any, context: __SerdeContext): string[] => { + return (output || []).map((entry: any) => entry); +}; + const deserializeAws_json1_1OrganizationNotInAllFeaturesModeException = ( output: any, context: __SerdeContext @@ -3896,6 +3998,10 @@ const deserializeAws_json1_1PutEventSelectorsResponse = ( context: __SerdeContext ): PutEventSelectorsResponse => { return { + AdvancedEventSelectors: + output.AdvancedEventSelectors !== undefined && output.AdvancedEventSelectors !== null + ? deserializeAws_json1_1AdvancedEventSelectors(output.AdvancedEventSelectors, context) + : undefined, EventSelectors: output.EventSelectors !== undefined && output.EventSelectors !== null ? deserializeAws_json1_1EventSelectors(output.EventSelectors, context) diff --git a/clients/client-codeartifact/Codeartifact.ts b/clients/client-codeartifact/Codeartifact.ts index 9cffdde1eb44..99adb1f7315d 100644 --- a/clients/client-codeartifact/Codeartifact.ts +++ b/clients/client-codeartifact/Codeartifact.ts @@ -165,7 +165,7 @@ import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; /** * AWS CodeArtifact is a fully managed artifact repository compatible with language-native
- * package managers and build tools such as npm, Apache Maven, and pip. You can use CodeArtifact to
+ * package managers and build tools such as npm, Apache Maven, NuGet, and pip. You can use CodeArtifact to
* share packages with development teams and pull packages. Packages can be pulled from both
* public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact
* repository and another repository, which effectively merges their contents from the point of
@@ -185,7 +185,10 @@ import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types";
* repository exposes endpoints for fetching and publishing packages using tools like the
*
*
* Package: A package is a bundle of software and the metadata required to
- * resolve dependencies and install the software. CodeArtifact supports npm, PyPI, and Maven package formats.npm
- * CLI, the Maven CLI (
+ * CLI, the
+ * NuGet
+ * CLI,
+ * the Maven CLI (
* mvn
* ), and
* pip
@@ -211,7 +214,7 @@ import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types";
*
In CodeArtifact, a package consists of:
*maven
*
*
+ * nuget
+ *
+ * ListTagsForResource
: Returns a list of the tags associated with a resource.
* PutDomainPermissionsPolicy
: Attaches a resource policy to a domain.
+ * TagResource
: Adds or updates tags for a resource.
+ * UntagResource
: Removes a tag from a resource.
* UpdatePackageVersionsStatus
: Updates the status of one or more versions of a package.
maven
*
*
+ * nuget
+ *
AWS CodeArtifact is a fully managed artifact repository compatible with language-native
- * package managers and build tools such as npm, Apache Maven, and pip. You can use CodeArtifact to
+ * package managers and build tools such as npm, Apache Maven, NuGet, and pip. You can use CodeArtifact to
* share packages with development teams and pull packages. Packages can be pulled from both
* public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact
* repository and another repository, which effectively merges their contents from the point of
@@ -353,7 +353,10 @@ export type CodeartifactClientResolvedConfig = __SmithyResolvedConfiguration<__H
* repository exposes endpoints for fetching and publishing packages using tools like the
*
*
* Package: A package is a bundle of software and the metadata required to
- * resolve dependencies and install the software. CodeArtifact supports npm, PyPI, and Maven package formats.npm
- * CLI, the Maven CLI (
+ * CLI, the
+ * NuGet
+ * CLI,
+ * the Maven CLI (
* mvn
* ), and
* pip
@@ -379,7 +382,7 @@ export type CodeartifactClientResolvedConfig = __SmithyResolvedConfiguration<__H
*
In CodeArtifact, a package consists of:
*maven
*
*
+ * nuget
+ *
+ * ListTagsForResource
: Returns a list of the tags associated with a resource.
* PutDomainPermissionsPolicy
: Attaches a resource policy to a domain.
+ * TagResource
: Adds or updates tags for a resource.
+ * UntagResource
: Removes a tag from a resource.
* UpdatePackageVersionsStatus
: Updates the status of one or more versions of a package.
maven
*
*
+ * nuget
+ *
- * The size of the asset. - *
+ * The name of the asset. + * */ - size?: number; + name: string | undefined; /** *- * The name of the asset. - *
+ * The size of the asset. + * */ - name: string | undefined; + size?: number; /** *@@ -61,6 +61,26 @@ export namespace AssetSummary { } export interface AssociateExternalConnectionRequest { + /** + *
The name of the domain that contains the repository.
+ */ + domain: string | undefined; + + /** + *+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + *
+ */ + domainOwner?: string; + + /** + *+ * The name of the repository to which the external connection is added. + *
+ */ + repository: string | undefined; + /** *
* The name of the external connection to add to the repository. The following values are supported:
@@ -96,29 +116,14 @@ export interface AssociateExternalConnectionRequest {
* public:maven-commonsware
- for the CommonsWare Android repository.
*
+ * public:nuget-org
- for the NuGet Gallery.
+ *
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
- */ - domainOwner?: string; - - /** - *The name of the domain that contains the repository.
- */ - domain: string | undefined; - - /** - *- * The name of the repository to which the external connection is added. - *
- */ - repository: string | undefined; } export namespace AssociateExternalConnectionRequest { @@ -130,6 +135,7 @@ export namespace AssociateExternalConnectionRequest { export enum PackageFormat { MAVEN = "maven", NPM = "npm", + NUGET = "nuget", PYPI = "pypi", } @@ -143,6 +149,11 @@ export enum ExternalConnectionStatus { * */ export interface RepositoryExternalConnectionInfo { + /** + *The name of the external connection associated with a repository.
+ */ + externalConnectionName?: string; + /** *
* The package format associated with a repository's external connection. The valid package formats are:
@@ -163,15 +174,15 @@ export interface RepositoryExternalConnectionInfo {
* maven
: A Maven package that contains compiled code in a distributable format, such as a JAR file.
*
+ * nuget
: A NuGet package.
+ *
The name of the external connection associated with a repository.
- */ - externalConnectionName?: string; - /** *
* The status of the external connection of a repository. There is one valid value, Available
.
@@ -215,17 +226,15 @@ export namespace UpstreamRepositoryInfo {
export interface RepositoryDescription {
/**
*
- * A text description of the repository. - *
+ * The name of the repository. + * */ - description?: string; + name?: string; /** - *A list of upstream repositories to associate with the repository. The order of the upstream repositories - * in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more - * information, see Working with upstream repositories.
+ *The 12-digit account number of the AWS account that manages the repository.
*/ - upstreams?: UpstreamRepositoryInfo[]; + administratorAccount?: string; /** *@@ -243,28 +252,30 @@ export interface RepositoryDescription { domainOwner?: string; /** - *
- * An array of external connections associated with the repository. - *
+ *The Amazon Resource Name (ARN) of the repository.
*/ - externalConnections?: RepositoryExternalConnectionInfo[]; + arn?: string; /** - *The 12-digit account number of the AWS account that manages the repository.
+ *+ * A text description of the repository. + *
*/ - administratorAccount?: string; + description?: string; /** - *The Amazon Resource Name (ARN) of the repository.
+ *A list of upstream repositories to associate with the repository. The order of the upstream repositories + * in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more + * information, see Working with upstream repositories.
*/ - arn?: string; + upstreams?: UpstreamRepositoryInfo[]; /** *- * The name of the repository. - *
+ * An array of external connections associated with the repository. + * */ - name?: string; + externalConnections?: RepositoryExternalConnectionInfo[]; } export namespace RepositoryDescription { @@ -304,20 +315,20 @@ export enum ResourceType { export interface ConflictException extends __SmithyException, $MetadataBearer { name: "ConflictException"; $fault: "client"; + message: string | undefined; /** *- * The type of AWS resource. + * The ID of the resource. *
*/ - resourceType?: ResourceType | string; + resourceId?: string; - message: string | undefined; /** *- * The ID of the resource. + * The type of AWS resource. *
*/ - resourceId?: string; + resourceType?: ResourceType | string; } export namespace ConflictException { @@ -349,6 +360,7 @@ export namespace InternalServerException { export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { name: "ResourceNotFoundException"; $fault: "client"; + message: string | undefined; /** ** The ID of the resource. @@ -356,7 +368,6 @@ export interface ResourceNotFoundException extends __SmithyException, $MetadataB */ resourceId?: string; - message: string | undefined; /** *
* The type of AWS resource. @@ -379,6 +390,7 @@ export namespace ResourceNotFoundException { export interface ServiceQuotaExceededException extends __SmithyException, $MetadataBearer { name: "ServiceQuotaExceededException"; $fault: "client"; + message: string | undefined; /** *
* The ID of the resource. @@ -386,7 +398,6 @@ export interface ServiceQuotaExceededException extends __SmithyException, $Metad */ resourceId?: string; - message: string | undefined; /** *
* The type of AWS resource. @@ -440,14 +451,13 @@ export enum ValidationExceptionReason { export interface ValidationException extends __SmithyException, $MetadataBearer { name: "ValidationException"; $fault: "client"; + message: string | undefined; /** *
* *
*/ reason?: ValidationExceptionReason | string; - - message: string | undefined; } export namespace ValidationException { @@ -457,13 +467,6 @@ export namespace ValidationException { } export interface CopyPackageVersionsRequest { - /** - *- * The name of the repository that contains the package versions to copy. - *
- */ - sourceRepository: string | undefined; - /** ** The name of the domain that contains the source and destination repositories. @@ -471,42 +474,27 @@ export interface CopyPackageVersionsRequest { */ domain: string | undefined; - /** - *
Set to true to copy packages from repositories that are upstream from the source - * repository to the destination repository. The default setting is false. For more information, - * see Working with - * upstream repositories.
- */ - includeFromUpstream?: boolean; - /** *- * The name of the package that is copied. - *
+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + * */ - package: string | undefined; + domainOwner?: string; /** *- * The versions of the package to copy. - *
- *
- * You must specify versions
or versionRevisions
. You cannot specify both.
- *
- * Set to true to overwrite a package version that already exists in the destination repository.
- * If set to false and the package version already exists in the destination repository,
- * the package version is returned in the failedVersions
field of the response with
- * an ALREADY_EXISTS
error code.
- *
@@ -528,31 +516,15 @@ export interface CopyPackageVersionsRequest {
* maven
: A Maven package that contains compiled code in a distributable format, such as a JAR file.
*
+ * nuget
: A NuGet package.
+ *
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
- */ - domainOwner?: string; - - /** - *
- * A list of key-value pairs. The keys are package versions and the values are package version revisions. A CopyPackageVersion
operation
- * succeeds if the specified versions in the source repository match the specified package version revision.
- *
- * You must specify versions
or versionRevisions
. You cannot specify both.
- *
* The namespace of the package. The package component that specifies its @@ -575,16 +547,65 @@ export interface CopyPackageVersionsRequest { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *- * The name of the repository into which package versions are copied. + * The name of the package that is copied. *
*/ - destinationRepository: string | undefined; + package: string | undefined; + + /** + *+ * The versions of the package to copy. + *
+ *
+ * You must specify versions
or versionRevisions
. You cannot specify both.
+ *
+ * A list of key-value pairs. The keys are package versions and the values are package version revisions. A CopyPackageVersion
operation
+ * succeeds if the specified versions in the source repository match the specified package version revision.
+ *
+ * You must specify versions
or versionRevisions
. You cannot specify both.
+ *
+ * Set to true to overwrite a package version that already exists in the destination repository.
+ * If set to false and the package version already exists in the destination repository,
+ * the package version is returned in the failedVersions
field of the response with
+ * an ALREADY_EXISTS
error code.
+ *
Set to true to copy packages from repositories that are upstream from the source + * repository to the destination repository. The default setting is false. For more information, + * see Working with + * upstream repositories.
+ */ + includeFromUpstream?: boolean; } export namespace CopyPackageVersionsRequest { @@ -608,13 +629,6 @@ export enum PackageVersionErrorCode { * */ export interface PackageVersionError { - /** - *- * The error message associated with the error. - *
- */ - errorMessage?: string; - /** *The error code associated with the error. Valid error codes are:
*+ * The error message associated with the error. + *
+ */ + errorMessage?: string; } export namespace PackageVersionError { @@ -782,12 +803,12 @@ export namespace CopyPackageVersionsResult { */ export interface Tag { /** - *The tag's key.
+ *The tag key.
*/ key: string | undefined; /** - *The tag's value.
+ *The tag value.
*/ value: string | undefined; } @@ -846,24 +867,12 @@ export enum DomainStatus { * */ export interface DomainDescription { - /** - *The Amazon Resource Name (ARN) of the domain.
- */ - arn?: string; - - /** - *- * A timestamp that represents the date and time the domain was created. - *
- */ - createdTime?: Date; - /** *- * The total size of all assets in the domain. - *
+ * The name of the domain. + * */ - assetSizeBytes?: number; + name?: string; /** *The AWS account ID that owns the domain.
@@ -871,21 +880,9 @@ export interface DomainDescription { owner?: string; /** - *The ARN of an AWS Key Management Service (AWS KMS) key associated with a domain.
- */ - encryptionKey?: string; - - /** - *The Amazon Resource Name (ARN) of the Amazon S3 bucket that is used to store package assets in the domain.
- */ - s3BucketArn?: string; - - /** - *- * The number of repositories in the domain. - *
+ *The Amazon Resource Name (ARN) of the domain.
*/ - repositoryCount?: number; + arn?: string; /** *The current status of a domain. The valid values are
@@ -906,10 +903,34 @@ export interface DomainDescription { /** *- * The name of the domain. - *
+ * A timestamp that represents the date and time the domain was created. + * */ - name?: string; + createdTime?: Date; + + /** + *The ARN of an AWS Key Management Service (AWS KMS) key associated with a domain.
+ */ + encryptionKey?: string; + + /** + *+ * The number of repositories in the domain. + *
+ */ + repositoryCount?: number; + + /** + *+ * The total size of all assets in the domain. + *
+ */ + assetSizeBytes?: number; + + /** + *The Amazon Resource Name (ARN) of the Amazon S3 bucket that is used to store package assets in the domain.
+ */ + s3BucketArn?: string; } export namespace DomainDescription { @@ -959,14 +980,11 @@ export namespace UpstreamRepository { export interface CreateRepositoryRequest { /** - *The name of the repository to create.
- */ - repository: string | undefined; - - /** - *One or more tag key-value pairs for the repository.
+ *+ * The domain that contains the created repository. + *
*/ - tags?: Tag[]; + domain: string | undefined; /** *@@ -976,12 +994,17 @@ export interface CreateRepositoryRequest { */ domainOwner?: string; + /** + *
The name of the repository to create.
+ */ + repository: string | undefined; + /** *- * The domain that contains the created repository. - *
+ * A description of the created repository. + * */ - domain: string | undefined; + description?: string; /** *A list of upstream repositories to associate with the repository. The order of the upstream repositories @@ -991,11 +1014,9 @@ export interface CreateRepositoryRequest { upstreams?: UpstreamRepository[]; /** - *
- * A description of the created repository. - *
+ *One or more tag key-value pairs for the repository.
*/ - description?: string; + tags?: Tag[]; } export namespace CreateRepositoryRequest { @@ -1022,18 +1043,18 @@ export namespace CreateRepositoryResult { export interface DeleteDomainRequest { /** *- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
+ * The name of the domain to delete. + * */ - domainOwner?: string; + domain: string | undefined; /** *- * The name of the domain to delete. - *
+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + * */ - domain: string | undefined; + domainOwner?: string; } export namespace DeleteDomainRequest { @@ -1060,11 +1081,10 @@ export namespace DeleteDomainResult { export interface DeleteDomainPermissionsPolicyRequest { /** *- * The current revision of the resource policy to be deleted. This revision is used for optimistic locking, which - * prevents others from overwriting your changes to the domain's resource policy. + * The name of the domain associated with the resource policy to be deleted. *
*/ - policyRevision?: string; + domain: string | undefined; /** *@@ -1076,10 +1096,11 @@ export interface DeleteDomainPermissionsPolicyRequest { /** *
- * The name of the domain associated with the resource policy to be deleted. + * The current revision of the resource policy to be deleted. This revision is used for optimistic locking, which + * prevents others from overwriting your changes to the domain's resource policy. *
*/ - domain: string | undefined; + policyRevision?: string; } export namespace DeleteDomainPermissionsPolicyRequest { @@ -1096,17 +1117,17 @@ export namespace DeleteDomainPermissionsPolicyRequest { export interface ResourcePolicy { /** *- * The current revision of the resource policy. + * The ARN of the resource associated with the resource policy *
*/ - revision?: string; + resourceArn?: string; /** *- * The ARN of the resource associated with the resource policy + * The current revision of the resource policy. *
*/ - resourceArn?: string; + revision?: string; /** *@@ -1140,17 +1161,18 @@ export namespace DeleteDomainPermissionsPolicyResult { export interface DeletePackageVersionsRequest { /** *
- * An array of strings that specify the versions of the package to delete. + * The name of the domain that contains the package to delete. *
*/ - versions: string[] | undefined; + domain: string | undefined; /** *- * The name of the package with the versions to delete. + * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. *
*/ - package: string | undefined; + domainOwner?: string; /** *@@ -1161,102 +1183,112 @@ export interface DeletePackageVersionsRequest { /** *
- * The namespace of the package. The package component that specifies its - * namespace depends on its type. For example: - *
+ * The format of the package versions to delete. The valid values are: + * *
- * The namespace of a Maven package is its groupId
.
- *
npm
+ *
*
- * The namespace of an npm package is its scope
.
- *
pypi
+ *
* - * A Python package does not contain a corresponding component, so - * Python packages do not have a namespace. - *
+ *maven
+ *
+ *
+ * nuget
+ *
- * The expected status of the package version to delete. Valid values are: - *
+ * The namespace of the package. The package component that specifies its + * namespace depends on its type. For example: + * *
- * Published
- *
- * Unfinished
- *
groupId
.
+ *
*
- * Unlisted
- *
scope
.
+ *
*
- * Archived
- *
- * Disposed
- *
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. + * The name of the package with the versions to delete. *
*/ - domainOwner?: string; + package: string | undefined; /** *- * The name of the domain that contains the package to delete. + * An array of strings that specify the versions of the package to delete. *
*/ - domain: string | undefined; + versions: string[] | undefined; /** *- * The format of the package versions to delete. The valid values are: + * The expected status of the package version to delete. Valid values are: *
*
- * npm
+ * Published
*
- * pypi
+ * Unfinished
*
- * maven
+ * Unlisted
+ *
+ * Archived
+ *
+ * Disposed
*
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
+ * The name of the domain that contains the repository to delete. + * */ - domainOwner?: string; + domain: string | undefined; /** *- * The name of the domain that contains the repository to delete. - *
+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + * */ - domain: string | undefined; + domainOwner?: string; /** *The name of the repository to delete.
@@ -1366,33 +1398,33 @@ export namespace DeleteRepositoryResult { export interface DeleteRepositoryPermissionsPolicyRequest { /** *- * The name of the repository that is associated with the resource policy to be deleted - *
+ * The name of the domain that contains the repository associated with the resource policy to be deleted. + * */ - repository: string | undefined; + domain: string | undefined; /** *- * The revision of the repository's resource policy to be deleted. This revision is used for optimistic locking, which - * prevents others from accidentally overwriting your changes to the repository's resource policy. - *
+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + * */ - policyRevision?: string; + domainOwner?: string; /** *- * The name of the domain that contains the repository associated with the resource policy to be deleted. - *
+ * The name of the repository that is associated with the resource policy to be deleted + * */ - domain: string | undefined; + repository: string | undefined; /** *- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
+ * The revision of the repository's resource policy to be deleted. This revision is used for optimistic locking, which + * prevents others from accidentally overwriting your changes to the repository's resource policy. + * */ - domainOwner?: string; + policyRevision?: string; } export namespace DeleteRepositoryPermissionsPolicyRequest { @@ -1419,18 +1451,18 @@ export namespace DeleteRepositoryPermissionsPolicyResult { export interface DescribeDomainRequest { /** *- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
+ * A string that specifies the name of the requested domain. + * */ - domainOwner?: string; + domain: string | undefined; /** *- * A string that specifies the name of the requested domain. - *
+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + * */ - domain: string | undefined; + domainOwner?: string; } export namespace DescribeDomainRequest { @@ -1465,15 +1497,16 @@ export interface DescribePackageVersionRequest { /** *
- * A string that contains the package version (for example, 3.5.2
).
- *
The name of the requested package version.
+ *The name of the repository that contains the package version.
*/ - package: string | undefined; + repository: string | undefined; /** *
@@ -1495,18 +1528,15 @@ export interface DescribePackageVersionRequest {
* maven
*
+ * nuget
+ *
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
- */ - domainOwner?: string; - /** ** The namespace of the package. The package component that specifies its @@ -1529,14 +1559,27 @@ export interface DescribePackageVersionRequest { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *The name of the repository that contains the package version.
+ *The name of the requested package version.
*/ - repository: string | undefined; + package: string | undefined; + + /** + *
+ * A string that contains the package version (for example, 3.5.2
).
+ *
- * The URL for license data. + * Name of the license. *
*/ - url?: string; + name?: string; /** *- * Name of the license. + * The URL for license data. *
*/ - name?: string; + url?: string; } export namespace LicenseInfo { @@ -1578,45 +1621,6 @@ export namespace LicenseInfo { * */ export interface PackageVersionDescription { - /** - *- * A timestamp that contains the date and time the package version was published. - *
- */ - publishedTime?: Date; - - /** - *
- * The name of the package that is displayed. The displayName
varies depending
- * on the package version's format. For example, if an npm package is named ui
,
- * is in the namespace vue
, and has the format npm
, then
- * the displayName
is @vue/ui
.
- *
- * The version of the package. - *
- */ - version?: string; - - /** - *- * A summary of the package version. The summary is extracted from the package. The information in and - * detail level of the summary depends on the package version's format. - *
- */ - summary?: string; - - /** - *- * The revision of the package version. - *
- */ - revision?: string; - /** *
* The format of the package version. The valid package formats are:
@@ -1637,17 +1641,15 @@ export interface PackageVersionDescription {
* maven
: A Maven package that contains compiled code in a distributable format, such as a JAR file.
*
+ * nuget
: A NuGet package.
+ *
- * The name of the requested package. - *
- */ - packageName?: string; - /** ** The namespace of the package. The package component that specifies its @@ -1670,10 +1672,83 @@ export interface PackageVersionDescription { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *+ * The name of the requested package. + *
+ */ + packageName?: string; + + /** + *
+ * The name of the package that is displayed. The displayName
varies depending
+ * on the package version's format. For example, if an npm package is named ui
,
+ * is in the namespace vue
, and has the format npm
, then
+ * the displayName
is @vue/ui
.
+ *
+ * The version of the package. + *
+ */ + version?: string; + + /** + *+ * A summary of the package version. The summary is extracted from the package. The information in and + * detail level of the summary depends on the package version's format. + *
+ */ + summary?: string; + + /** + *+ * The homepage associated with the package. + *
+ */ + homePage?: string; + + /** + *+ * The repository for the source code in the package version, or the source code used to build it. + *
+ */ + sourceCodeRepository?: string; + + /** + *+ * A timestamp that contains the date and time the package version was published. + *
+ */ + publishedTime?: Date; + + /** + *+ * Information about licenses associated with the package version. + *
+ */ + licenses?: LicenseInfo[]; + + /** + *+ * The revision of the package version. + *
+ */ + revision?: string; + /** ** A string that contains the status of the package version. It can be one of the following: @@ -1707,27 +1782,6 @@ export interface PackageVersionDescription { * */ status?: PackageVersionStatus | string; - - /** - *
- * The homepage associated with the package. - *
- */ - homePage?: string; - - /** - *- * Information about licenses associated with the package version. - *
- */ - licenses?: LicenseInfo[]; - - /** - *- * The repository for the source code in the package version, or the source code used to build it. - *
- */ - sourceCodeRepository?: string; } export namespace PackageVersionDescription { @@ -1800,16 +1854,6 @@ export namespace DescribeRepositoryResult { } export interface DisassociateExternalConnectionRequest { - /** - *The name of the repository from which the external connection will be removed.
- */ - repository: string | undefined; - - /** - *The name of the external connection to be removed from the repository.
- */ - externalConnection: string | undefined; - /** *The name of the domain that contains the repository from which to remove the external * repository.
@@ -1823,6 +1867,16 @@ export interface DisassociateExternalConnectionRequest { * */ domainOwner?: string; + + /** + *The name of the repository from which the external connection will be removed.
+ */ + repository: string | undefined; + + /** + *The name of the external connection to be removed from the repository.
+ */ + externalConnection: string | undefined; } export namespace DisassociateExternalConnectionRequest { @@ -1847,6 +1901,21 @@ export namespace DisassociateExternalConnectionResult { } export interface DisposePackageVersionsRequest { + /** + *+ * The name of the domain that contains the repository you want to dispose. + *
+ */ + domain: string | undefined; + + /** + *+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + *
+ */ + domainOwner?: string; + /** ** The name of the repository that contains the package versions you want to dispose. @@ -1854,6 +1923,35 @@ export interface DisposePackageVersionsRequest { */ repository: string | undefined; + /** + *
+ * A format that specifies the type of package versions you want to dispose. The valid values are: + *
+ *
+ * npm
+ *
+ * pypi
+ *
+ * maven
+ *
+ * nuget
+ *
* The namespace of the package. The package component that specifies its @@ -1876,10 +1974,23 @@ export interface DisposePackageVersionsRequest { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *+ * The name of the package with the versions you want to dispose. + *
+ */ + package: string | undefined; + /** ** The versions of the package you want to dispose. @@ -1889,10 +2000,10 @@ export interface DisposePackageVersionsRequest { /** *
- * The name of the package with the versions you want to dispose. + * The revisions of the package versions you want to dispose. *
*/ - package: string | undefined; + versionRevisions?: { [key: string]: string }; /** *@@ -1927,52 +2038,6 @@ export interface DisposePackageVersionsRequest { * */ expectedStatus?: PackageVersionStatus | string; - - /** - *
- * The revisions of the package versions you want to dispose. - *
- */ - versionRevisions?: { [key: string]: string }; - - /** - *- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
- */ - domainOwner?: string; - - /** - *- * A format that specifies the type of package versions you want to dispose. The valid values are: - *
- *
- * npm
- *
- * pypi
- *
- * maven
- *
- * The name of the domain that contains the repository you want to dispose. - *
- */ - domain: string | undefined; } export namespace DisposePackageVersionsRequest { @@ -2036,7 +2101,14 @@ export namespace DisposePackageVersionsResult { }); } -export interface GetAuthorizationTokenRequest { +export interface GetAuthorizationTokenRequest { + /** + *+ * The name of the domain that is in scope for the generated authorization token. + *
+ */ + domain: string | undefined; + /** ** The 12-digit account number of the AWS account that owns the domain. It does not include @@ -2045,13 +2117,6 @@ export interface GetAuthorizationTokenRequest { */ domainOwner?: string; - /** - *
- * The name of the domain that is in scope for the generated authorization token. - *
- */ - domain: string | undefined; - /** *The time, in seconds, that the generated authorization token is valid. Valid values are
* 0
and any number between 900
(15 minutes) and 43200
(12 hours).
@@ -2070,17 +2135,17 @@ export namespace GetAuthorizationTokenRequest {
export interface GetAuthorizationTokenResult {
/**
*
- * A timestamp that specifies the date and time the authorization token expires. - *
+ * The returned authentication token. + * */ - expiration?: Date; + authorizationToken?: string; /** *- * The returned authentication token. - *
+ * A timestamp that specifies the date and time the authorization token expires. + * */ - authorizationToken?: string; + expiration?: Date; } export namespace GetAuthorizationTokenResult { @@ -2145,10 +2210,10 @@ export interface GetPackageVersionAssetRequest { /** *- * The name of the package version revision that contains the requested asset. + * The repository that contains the package version with the requested asset. *
*/ - packageVersionRevision?: string; + repository: string | undefined; /** *
@@ -2170,31 +2235,15 @@ export interface GetPackageVersionAssetRequest {
* maven
*
+ * nuget
+ *
- * The repository that contains the package version with the requested asset. - *
- */ - repository: string | undefined; - - /** - *- * The name of the package that contains the requested asset. - *
- */ - package: string | undefined; - - /** - *
- * A string that contains the package version (for example, 3.5.2
).
- *
* The namespace of the package. The package component that specifies its @@ -2217,16 +2266,43 @@ export interface GetPackageVersionAssetRequest { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *+ * The name of the package that contains the requested asset. + *
+ */ + package: string | undefined; + + /** + *
+ * A string that contains the package version (for example, 3.5.2
).
+ *
* The name of the requested asset. *
*/ asset: string | undefined; + + /** + *+ * The name of the package version revision that contains the requested asset. + *
+ */ + packageVersionRevision?: string; } export namespace GetPackageVersionAssetRequest { @@ -2236,13 +2312,6 @@ export namespace GetPackageVersionAssetRequest { } export interface GetPackageVersionAssetResult { - /** - *- * The name of the package version revision that contains the downloaded asset. - *
- */ - packageVersionRevision?: string; - /** *The binary file, or asset, that is downloaded.
*/ @@ -2261,6 +2330,13 @@ export interface GetPackageVersionAssetResult { * */ packageVersion?: string; + + /** + *+ * The name of the package version revision that contains the downloaded asset. + *
+ */ + packageVersionRevision?: string; } export namespace GetPackageVersionAssetResult { @@ -2272,10 +2348,18 @@ export namespace GetPackageVersionAssetResult { export interface GetPackageVersionReadmeRequest { /** *- * The name of the package version that contains the requested readme file. + * The name of the domain that contains the repository that contains the package version with the requested readme file. *
*/ - package: string | undefined; + domain: string | undefined; + + /** + *+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + *
+ */ + domainOwner?: string; /** *@@ -2286,10 +2370,32 @@ export interface GetPackageVersionReadmeRequest { /** *
- * The name of the domain that contains the repository that contains the package version with the requested readme file. + * A format that specifies the type of the package version with the requested readme file. The valid values are: *
+ *
+ * npm
+ *
+ * pypi
+ *
+ * maven
+ *
+ * nuget
+ *
@@ -2313,17 +2419,22 @@ export interface GetPackageVersionReadmeRequest { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
+ * The name of the package version that contains the requested readme file. + * */ - domainOwner?: string; + package: string | undefined; /** *@@ -2331,30 +2442,6 @@ export interface GetPackageVersionReadmeRequest { *
*/ packageVersion: string | undefined; - - /** - *- * A format that specifies the type of the package version with the requested readme file. The valid values are: - *
- *
- * npm
- *
- * pypi
- *
- * maven
- *
maven
*
*
+ *
+ * nuget
+ *
+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *- * The text of the returned readme file. + * The name of the package that contains the returned readme file. *
*/ - readme?: string; + package?: string; /** *@@ -2437,10 +2535,10 @@ export interface GetPackageVersionReadmeResult { /** *
- * The name of the package that contains the returned readme file. + * The text of the returned readme file. *
*/ - package?: string; + readme?: string; } export namespace GetPackageVersionReadmeResult { @@ -2450,6 +2548,13 @@ export namespace GetPackageVersionReadmeResult { } export interface GetRepositoryEndpointRequest { + /** + *+ * The name of the domain that contains the repository. + *
+ */ + domain: string | undefined; + /** ** The 12-digit account number of the AWS account that owns the domain that contains the repository. It does not include @@ -2458,6 +2563,13 @@ export interface GetRepositoryEndpointRequest { */ domainOwner?: string; + /** + *
+ * The name of the repository. + *
+ */ + repository: string | undefined; + /** *
* Returns which endpoint of a repository to return. A repository has one endpoint for each
@@ -2479,23 +2591,14 @@ export interface GetRepositoryEndpointRequest {
* maven
*
+ * nuget
+ *
- * The name of the repository. - *
- */ - repository: string | undefined; - - /** - *- * The name of the domain that contains the repository. - *
- */ - domain: string | undefined; } export namespace GetRepositoryEndpointRequest { @@ -2522,18 +2625,18 @@ export namespace GetRepositoryEndpointResult { export interface GetRepositoryPermissionsPolicyRequest { /** *- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. + * The name of the domain containing the repository whose associated resource policy is to be retrieved. *
*/ - domainOwner?: string; + domain: string | undefined; /** *- * The name of the domain containing the repository whose associated resource policy is to be retrieved. + * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. *
*/ - domain: string | undefined; + domainOwner?: string; /** *@@ -2594,6 +2697,13 @@ export namespace ListDomainsRequest { * objects.
*/ export interface DomainSummary { + /** + *+ * The name of the domain. + *
+ */ + name?: string; + /** ** The 12-digit account number of the AWS account that owns the domain. It does not include @@ -2609,20 +2719,6 @@ export interface DomainSummary { */ arn?: string; - /** - *
- * The name of the domain. - *
- */ - name?: string; - - /** - *- * The key used to encrypt the domain. - *
- */ - encryptionKey?: string; - /** ** A string that contains the status of the domain. The valid values are: @@ -2648,6 +2744,13 @@ export interface DomainSummary { *
*/ createdTime?: Date; + + /** + *+ * The key used to encrypt the domain. + *
+ */ + encryptionKey?: string; } export namespace DomainSummary { @@ -2680,7 +2783,22 @@ export namespace ListDomainsResult { }); } -export interface ListPackagesRequest { +export interface ListPackagesRequest { + /** + *+ * The domain that contains the repository that contains the requested list of packages. + *
+ */ + domain: string | undefined; + + /** + *+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + *
+ */ + domainOwner?: string; + /** ** The name of the repository from which packages are to be listed. @@ -2688,6 +2806,35 @@ export interface ListPackagesRequest { */ repository: string | undefined; + /** + *
+ * The format of the packages. The valid package types are: + *
+ *
+ * npm
: A Node Package Manager (npm) package.
+ *
+ * pypi
: A Python Package Index (PyPI) package.
+ *
+ * maven
: A Maven package that contains compiled code in a distributable format, such as a JAR file.
+ *
+ * nuget
: A NuGet package.
+ *
* The namespace of the package. The package component that specifies its @@ -2710,21 +2857,20 @@ export interface ListPackagesRequest { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *- * The domain that contains the repository that contains the requested list of packages. - *
- */ - domain: string | undefined; - - /** - *
- * A prefix used to filter returned repositories. Only repositories with names that start with
- * repositoryPrefix
are returned.
+ * A prefix used to filter returned packages. Only packages with names that start with
+ * packagePrefix
are returned.
*
- * The format of the packages. The valid package types are: - *
- *
- * npm
: A Node Package Manager (npm) package.
- *
- * pypi
: A Python Package Index (PyPI) package.
- *
- * maven
: A Maven package that contains compiled code in a distributable format, such as a JAR file.
- *
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
- */ - domainOwner?: string; } export namespace ListPackagesRequest { @@ -2812,6 +2926,11 @@ export interface PackageSummary { *maven
*
*
+ *
+ * nuget
+ *
+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *+ * The name of the domain that contains the repository associated with the package version assets. + *
+ */ + domain: string | undefined; + + /** + *+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + *
+ */ + domainOwner?: string; + + /** + *+ * The name of the repository that contains the package that contains the returned package version assets. + *
+ */ + repository: string | undefined; + /** *
* The format of the package that contains the returned package version assets. The valid package types are:
@@ -2902,18 +3049,15 @@ export interface ListPackageVersionAssetsRequest {
* maven
: A Maven package that contains compiled code in a distributable format, such as a JAR file.
*
+ * nuget
: A NuGet package.
+ *
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
- */ - domainOwner?: string; - /** ** The namespace of the package. The package component that specifies its @@ -2936,30 +3080,22 @@ export interface ListPackageVersionAssetsRequest { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *- * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. - *
- */ - nextToken?: string; - - /** - *- * The name of the repository that contains the package that contains the returned package version assets. - *
- */ - repository: string | undefined; - - /** - *- * The maximum number of results to return per page. + * The name of the package that contains the returned package version assets. *
*/ - maxResults?: number; + package: string | undefined; /** *@@ -2970,17 +3106,17 @@ export interface ListPackageVersionAssetsRequest { /** *
- * The name of the package that contains the returned package version assets. + * The maximum number of results to return per page. *
*/ - package: string | undefined; + maxResults?: number; /** *- * The name of the domain that contains the repository associated with the package version assets. + * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. *
*/ - domain: string | undefined; + nextToken?: string; } export namespace ListPackageVersionAssetsRequest { @@ -2992,26 +3128,10 @@ export namespace ListPackageVersionAssetsRequest { export interface ListPackageVersionAssetsResult { /** *- * The name of the package that contains the returned package version assets. - *
- */ - package?: string; - - /** - *
- * The returned list of
- * AssetSummary
- * objects.
- *
- * The version of the package associated with the returned assets. + * The format of the package that contains the returned package version assets. *
*/ - version?: string; + format?: PackageFormat | string; /** *@@ -3035,23 +3155,36 @@ export interface ListPackageVersionAssetsResult { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *- * The current revision associated with the package version. + * The name of the package that contains the returned package version assets. *
*/ - versionRevision?: string; + package?: string; /** *- * The format of the package that contains the returned package version assets. + * The version of the package associated with the returned assets. *
*/ - format?: PackageFormat | string; + version?: string; + + /** + *+ * The current revision associated with the package version. + *
+ */ + versionRevision?: string; /** *@@ -3059,6 +3192,15 @@ export interface ListPackageVersionAssetsResult { *
*/ nextToken?: string; + + /** + *
+ * The returned list of
+ * AssetSummary
+ * objects.
+ *
- * A string that contains the package version (for example, 3.5.2
).
- *
* The domain that contains the repository that contains the requested package version dependencies. @@ -3092,10 +3227,10 @@ export interface ListPackageVersionDependenciesRequest { /** *
- * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. - *
+ * The name of the repository that contains the requested package version. + * */ - nextToken?: string; + repository: string | undefined; /** *
@@ -3117,24 +3252,15 @@ export interface ListPackageVersionDependenciesRequest {
* maven
: A Maven package that contains compiled code in a distributable format, such as a JAR file.
*
+ * nuget
: A NuGet package.
+ *
- * The name of the repository that contains the requested package version. - *
- */ - repository: string | undefined; - - /** - *- * The name of the package versions' package. - *
- */ - package: string | undefined; - /** ** The namespace of the package. The package component that specifies its @@ -3157,9 +3283,36 @@ export interface ListPackageVersionDependenciesRequest { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *+ * The name of the package versions' package. + *
+ */ + package: string | undefined; + + /** + *
+ * A string that contains the package version (for example, 3.5.2
).
+ *
+ * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + *
+ */ + nextToken?: string; } export namespace ListPackageVersionDependenciesRequest { @@ -3174,20 +3327,6 @@ export namespace ListPackageVersionDependenciesRequest { * */ export interface PackageDependency { - /** - * The type of a package dependency. The possible values depend on the package type.
- * Example types are compile
, runtime
, and test
for Maven
- * packages, and dev
, prod
, and optional
for npm packages.
- * The name of the package that this package depends on. - *
- */ - package?: string; - /** ** The namespace of the package. The package component that specifies its @@ -3210,10 +3349,30 @@ export interface PackageDependency { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *+ * The name of the package that this package depends on. + *
+ */ + package?: string; + + /** + * The type of a package dependency. The possible values depend on the package type.
+ * Example types are compile
, runtime
, and test
for Maven
+ * packages, and dev
, prod
, and optional
for npm packages.
* The required version, or version range, of the package that this package depends on. The version format
@@ -3251,6 +3410,11 @@ export interface ListPackageVersionDependenciesResult {
* maven
*
+ * nuget
+ *
+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *- * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. - *
+ * The name of the package that contains the returned package versions dependencies. + * */ - nextToken?: string; + package?: string; /** *@@ -3297,17 +3467,17 @@ export interface ListPackageVersionDependenciesResult { /** *
- * The name of the package that contains the returned package versions dependencies. + * The current revision associated with the package version. *
*/ - package?: string; + versionRevision?: string; /** *- * The current revision associated with the package version. - *
+ * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + * */ - versionRevision?: string; + nextToken?: string; /** *@@ -3339,10 +3509,11 @@ export interface ListPackageVersionsRequest { /** *
- * The name of the package for which you want to return a list of package versions. - *
+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + * */ - package: string | undefined; + domainOwner?: string; /** *@@ -3351,13 +3522,6 @@ export interface ListPackageVersionsRequest { */ repository: string | undefined; - /** - *
- * How to sort the returned list of package versions. - *
- */ - sortBy?: PackageVersionSortType | string; - /** *
* The format of the returned packages. The valid package types are:
@@ -3378,31 +3542,53 @@ export interface ListPackageVersionsRequest {
* maven
: A Maven package that contains compiled code in a distributable format, such as a JAR file.
*
+ * nuget
: A NuGet package.
+ *
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
- */ - domainOwner?: string; - - /** - *- * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. - *
+ * The namespace of the package. The package component that specifies its + * namespace depends on its type. For example: + * + *
+ * The namespace of a Maven package is its groupId
.
+ *
+ * The namespace of an npm package is its scope
.
+ *
+ * A Python package does not contain a corresponding component, so + * Python packages do not have a namespace. + *
+ *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *- * The maximum number of results to return per page. - *
+ * The name of the package for which you want to return a list of package versions. + * */ - maxResults?: number; + package: string | undefined; /** *@@ -3440,29 +3626,24 @@ export interface ListPackageVersionsRequest { /** *
- * The namespace of the package. The package component that specifies its - * namespace depends on its type. For example: + * How to sort the returned list of package versions. *
- *
- * The namespace of a Maven package is its groupId
.
- *
- * The namespace of an npm package is its scope
.
- *
- * A Python package does not contain a corresponding component, so - * Python packages do not have a namespace. - *
- *+ * The maximum number of results to return per page. + *
+ */ + maxResults?: number; + + /** + *+ * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + *
+ */ + nextToken?: string; } export namespace ListPackageVersionsRequest { @@ -3481,6 +3662,13 @@ export namespace ListPackageVersionsRequest { * */ export interface PackageVersionSummary { + /** + *+ * Information about a package version. + *
+ */ + version: string | undefined; + /** ** The revision associated with a package version. @@ -3521,13 +3709,6 @@ export interface PackageVersionSummary { * */ status: PackageVersionStatus | string | undefined; - - /** - *
- * Information about a package version. - *
- */ - version: string | undefined; } export namespace PackageVersionSummary { @@ -3539,14 +3720,23 @@ export namespace PackageVersionSummary { export interface ListPackageVersionsResult { /** *
- * The returned list of
- *
- * PackageVersionSummary
- *
- * objects.
- *
+ * For Maven and PyPI packages, it's the most recently published package version. + *
+ *
+ * For npm packages, it's the version referenced by the
+ * latest
tag. If the latest
tag is not set, it's the most recently published package version.
+ *
@@ -3568,36 +3758,14 @@ export interface ListPackageVersionsResult {
* maven
*
- * If there are additional results, this is the token for the next set of results. - *
- */ - nextToken?: string; - - /** - *- * The default package version to display. This depends on the package format: - *
- *- * For Maven and PyPI packages, it's the most recently published package version. - *
- *
- * For npm packages, it's the version referenced by the
- * latest
tag. If the latest
tag is not set, it's the most recently published package version.
- *
nuget
+ *
* @@ -3621,6 +3789,12 @@ export interface ListPackageVersionsResult { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *
+ * The returned list of
+ *
+ * PackageVersionSummary
+ *
+ * objects.
+ *
+ * If there are additional results, this is the token for the next set of results. + *
+ */ + nextToken?: string; } export namespace ListPackageVersionsResult { @@ -3641,24 +3833,24 @@ export namespace ListPackageVersionsResult { export interface ListRepositoriesRequest { /** - *- * The maximum number of results to return per page. - *
+ * A prefix used to filter returned repositories. Only repositories with names that start
+ * with repositoryPrefix
are returned.
- * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + * The maximum number of results to return per page. *
*/ - nextToken?: string; + maxResults?: number; /** - * A prefix used to filter returned repositories. Only repositories with names that start
- * with repositoryPrefix
are returned.
+ * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + *
*/ - repositoryPrefix?: string; + nextToken?: string; } export namespace ListRepositoriesRequest { @@ -3677,44 +3869,44 @@ export namespace ListRepositoriesRequest { export interface RepositorySummary { /** *- * The name of the domain that contains the repository. + * The name of the repository. *
*/ - domainName?: string; + name?: string; /** *- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. + * The AWS account ID that manages the repository. *
*/ - domainOwner?: string; - - /** - *- * The description of the repository. - *
- */ - description?: string; + administratorAccount?: string; /** *- * The name of the repository. + * The name of the domain that contains the repository. *
*/ - name?: string; + domainName?: string; /** *- * The AWS account ID that manages the repository. + * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. *
*/ - administratorAccount?: string; + domainOwner?: string; /** *The ARN of the repository.
*/ arn?: string; + + /** + *+ * The description of the repository. + *
+ */ + description?: string; } export namespace RepositorySummary { @@ -3724,13 +3916,6 @@ export namespace RepositorySummary { } export interface ListRepositoriesResult { - /** - *- * If there are additional results, this is the token for the next set of results. - *
- */ - nextToken?: string; - /** ** The returned list of @@ -3740,6 +3925,13 @@ export interface ListRepositoriesResult { *
*/ repositories?: RepositorySummary[]; + + /** + *+ * If there are additional results, this is the token for the next set of results. + *
+ */ + nextToken?: string; } export namespace ListRepositoriesResult { @@ -3751,17 +3943,25 @@ export namespace ListRepositoriesResult { export interface ListRepositoriesInDomainRequest { /** *- * Filter the list of repositories to only include those that are managed by the AWS account ID. + * The name of the domain that contains the returned list of repositories. *
*/ - administratorAccount?: string; + domain: string | undefined; /** *- * The name of the domain that contains the returned list of repositories. + * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + *
+ */ + domainOwner?: string; + + /** + *+ * Filter the list of repositories to only include those that are managed by the AWS account ID. *
*/ - domain: string | undefined; + administratorAccount?: string; /** *@@ -3771,14 +3971,6 @@ export interface ListRepositoriesInDomainRequest { */ repositoryPrefix?: string; - /** - *
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
- */ - domainOwner?: string; - /** ** The maximum number of results to return per page. @@ -3803,17 +3995,17 @@ export namespace ListRepositoriesInDomainRequest { export interface ListRepositoriesInDomainResult { /** *
- * If there are additional results, this is the token for the next set of results. - *
+ * The returned list of repositories. + * */ - nextToken?: string; + repositories?: RepositorySummary[]; /** *- * The returned list of repositories. - *
+ * If there are additional results, this is the token for the next set of results. + * */ - repositories?: RepositorySummary[]; + nextToken?: string; } export namespace ListRepositoriesInDomainResult { @@ -3851,11 +4043,10 @@ export namespace ListTagsForResourceResult { export interface PutDomainPermissionsPolicyRequest { /** *- * The current revision of the resource policy to be set. This revision is used for optimistic locking, which - * prevents others from overwriting your changes to the domain's resource policy. + * The name of the domain on which to set the resource policy. *
*/ - policyRevision?: string; + domain: string | undefined; /** *@@ -3866,17 +4057,18 @@ export interface PutDomainPermissionsPolicyRequest { domainOwner?: string; /** - *
A valid displayable JSON Aspen policy string to be set as the access control resource - * policy on the provided domain.
+ *+ * The current revision of the resource policy to be set. This revision is used for optimistic locking, which + * prevents others from overwriting your changes to the domain's resource policy. + *
*/ - policyDocument: string | undefined; + policyRevision?: string; /** - *- * The name of the domain on which to set the resource policy. - *
+ *A valid displayable JSON Aspen policy string to be set as the access control resource + * policy on the provided domain.
*/ - domain: string | undefined; + policyDocument: string | undefined; } export namespace PutDomainPermissionsPolicyRequest { @@ -3901,12 +4093,10 @@ export namespace PutDomainPermissionsPolicyResult { export interface PutRepositoryPermissionsPolicyRequest { /** *- * Sets the revision of the resource policy that specifies permissions to access the repository. - * This revision is used for optimistic locking, which prevents others from overwriting your - * changes to the repository's resource policy. + * The name of the domain containing the repository to set the resource policy on. *
*/ - policyRevision?: string; + domain: string | undefined; /** *@@ -3923,10 +4113,12 @@ export interface PutRepositoryPermissionsPolicyRequest { /** *
- * The name of the domain containing the repository to set the resource policy on. + * Sets the revision of the resource policy that specifies permissions to access the repository. + * This revision is used for optimistic locking, which prevents others from overwriting your + * changes to the repository's resource policy. *
*/ - domain: string | undefined; + policyRevision?: string; /** *A valid displayable JSON Aspen policy string to be set as the access control resource @@ -3956,14 +4148,14 @@ export namespace PutRepositoryPermissionsPolicyResult { export interface TagResourceRequest { /** - *
The tags you want to modify or add to the resource.
+ *The Amazon Resource Name (ARN) of the resource that you want to add or update tags for.
*/ - tags: Tag[] | undefined; + resourceArn: string | undefined; /** - *The Amazon Resource Name (ARN) of the resource to which you want to add or update tags.
+ *The tags you want to modify or add to the resource.
*/ - resourceArn: string | undefined; + tags: Tag[] | undefined; } export namespace TagResourceRequest { @@ -3982,7 +4174,7 @@ export namespace TagResourceResult { export interface UntagResourceRequest { /** - *The Amazon Resource Name (ARN) of the resource to which you want to remove tags.
+ *The Amazon Resource Name (ARN) of the resource that you want to remove tags from.
*/ resourceArn: string | undefined; @@ -4016,39 +4208,18 @@ export interface UpdatePackageVersionsStatusRequest { /** *- * An array of strings that specify the versions of the package with the statuses to update. - *
- */ - versions: string[] | undefined; - - /** - *- * The name of the package with the version statuses to update. - *
+ * The 12-digit account number of the AWS account that owns the domain. It does not include + * dashes or spaces. + * */ - package: string | undefined; + domainOwner?: string; /** *- * The status you want to change the package version status to. + * The repository that contains the package versions with the status you want to update. *
*/ - targetStatus: PackageVersionStatus | string | undefined; - - /** - * A map of package versions and package version revisions. The map key
is the
- * package version (for example, 3.5.2
), and the map value
is the
- * package version revision.
- * The 12-digit account number of the AWS account that owns the domain. It does not include - * dashes or spaces. - *
- */ - domainOwner?: string; + repository: string | undefined; /** *
@@ -4070,25 +4241,15 @@ export interface UpdatePackageVersionsStatusRequest {
* maven
*
+ * nuget
+ *
The package version’s expected status before it is updated. If
- * expectedStatus
is provided, the package version's status is updated only if its
- * status at the time UpdatePackageVersionsStatus
is called matches
- * expectedStatus
.
- * The repository that contains the package versions with the status you want to update. - *
- */ - repository: string | undefined; - /** ** The namespace of the package. The package component that specifies its @@ -4111,9 +4272,51 @@ export interface UpdatePackageVersionsStatusRequest { * Python packages do not have a namespace. *
* + *+ * A NuGet package does not contain a corresponding component, so + * NuGet packages do not have a namespace. + *
+ *+ * The name of the package with the version statuses to update. + *
+ */ + package: string | undefined; + + /** + *+ * An array of strings that specify the versions of the package with the statuses to update. + *
+ */ + versions: string[] | undefined; + + /** + * A map of package versions and package version revisions. The map key
is the
+ * package version (for example, 3.5.2
), and the map value
is the
+ * package version revision.
The package version’s expected status before it is updated. If
+ * expectedStatus
is provided, the package version's status is updated only if its
+ * status at the time UpdatePackageVersionsStatus
is called matches
+ * expectedStatus
.
+ * The status you want to change the package version status to. + *
+ */ + targetStatus: PackageVersionStatus | string | undefined; } export namespace UpdatePackageVersionsStatusRequest { @@ -4123,12 +4326,6 @@ export namespace UpdatePackageVersionsStatusRequest { } export interface UpdatePackageVersionsStatusResult { - /** - * A list of SuccessfulPackageVersionInfo
objects, one for each package version
- * with a status that successfully updated.
* A list of PackageVersionError
objects, one for each package version with
@@ -4136,6 +4333,12 @@ export interface UpdatePackageVersionsStatusResult {
*
A list of SuccessfulPackageVersionInfo
objects, one for each package version
+ * with a status that successfully updated.
A list of upstream repositories to associate with the repository. The order of the upstream repositories - * in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more - * information, see Working with upstream repositories.
- */ - upstreams?: UpstreamRepository[]; - /** ** The name of the domain associated with the repository to update. @@ -4159,13 +4355,6 @@ export interface UpdateRepositoryRequest { */ domain: string | undefined; - /** - *
- * An updated repository description. - *
- */ - description?: string; - /** ** The 12-digit account number of the AWS account that owns the domain. It does not include @@ -4180,6 +4369,20 @@ export interface UpdateRepositoryRequest { *
*/ repository: string | undefined; + + /** + *+ * An updated repository description. + *
+ */ + description?: string; + + /** + *A list of upstream repositories to associate with the repository. The order of the upstream repositories + * in the list determines their priority order when AWS CodeArtifact looks for a requested package version. For more + * information, see Working with upstream repositories.
+ */ + upstreams?: UpstreamRepository[]; } export namespace UpdateRepositoryRequest { diff --git a/clients/client-codeartifact/protocols/Aws_restJson1.ts b/clients/client-codeartifact/protocols/Aws_restJson1.ts index f0f7ee4f8f2c..24ead115efb4 100644 --- a/clients/client-codeartifact/protocols/Aws_restJson1.ts +++ b/clients/client-codeartifact/protocols/Aws_restJson1.ts @@ -146,10 +146,10 @@ export const serializeAws_restJson1AssociateExternalConnectionCommand = async ( }; let resolvedPath = "/v1/repository/external-connection"; const query: any = { - ...(input.externalConnection !== undefined && { "external-connection": input.externalConnection }), - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.repository !== undefined && { repository: input.repository }), + ...(input.externalConnection !== undefined && { "external-connection": input.externalConnection }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -174,13 +174,13 @@ export const serializeAws_restJson1CopyPackageVersionsCommand = async ( }; let resolvedPath = "/v1/package/versions/copy"; const query: any = { - ...(input.sourceRepository !== undefined && { "source-repository": input.sourceRepository }), ...(input.domain !== undefined && { domain: input.domain }), - ...(input.package !== undefined && { package: input.package }), - ...(input.format !== undefined && { format: input.format }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.namespace !== undefined && { namespace: input.namespace }), + ...(input.sourceRepository !== undefined && { "source-repository": input.sourceRepository }), ...(input.destinationRepository !== undefined && { "destination-repository": input.destinationRepository }), + ...(input.format !== undefined && { format: input.format }), + ...(input.namespace !== undefined && { namespace: input.namespace }), + ...(input.package !== undefined && { package: input.package }), }; let body: any; body = JSON.stringify({ @@ -244,9 +244,9 @@ export const serializeAws_restJson1CreateRepositoryCommand = async ( }; let resolvedPath = "/v1/repository"; const query: any = { - ...(input.repository !== undefined && { repository: input.repository }), - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), + ...(input.repository !== undefined && { repository: input.repository }), }; let body: any; body = JSON.stringify({ @@ -278,8 +278,8 @@ export const serializeAws_restJson1DeleteDomainCommand = async ( }; let resolvedPath = "/v1/domain"; const query: any = { - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -304,9 +304,9 @@ export const serializeAws_restJson1DeleteDomainPermissionsPolicyCommand = async }; let resolvedPath = "/v1/domain/permissions/policy"; const query: any = { - ...(input.policyRevision !== undefined && { "policy-revision": input.policyRevision }), - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), + ...(input.policyRevision !== undefined && { "policy-revision": input.policyRevision }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -331,12 +331,12 @@ export const serializeAws_restJson1DeletePackageVersionsCommand = async ( }; let resolvedPath = "/v1/package/versions/delete"; const query: any = { - ...(input.package !== undefined && { package: input.package }), - ...(input.repository !== undefined && { repository: input.repository }), - ...(input.namespace !== undefined && { namespace: input.namespace }), - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), + ...(input.repository !== undefined && { repository: input.repository }), ...(input.format !== undefined && { format: input.format }), + ...(input.namespace !== undefined && { namespace: input.namespace }), + ...(input.package !== undefined && { package: input.package }), }; let body: any; body = JSON.stringify({ @@ -367,8 +367,8 @@ export const serializeAws_restJson1DeleteRepositoryCommand = async ( }; let resolvedPath = "/v1/repository"; const query: any = { - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.repository !== undefined && { repository: input.repository }), }; let body: any; @@ -394,10 +394,10 @@ export const serializeAws_restJson1DeleteRepositoryPermissionsPolicyCommand = as }; let resolvedPath = "/v1/repository/permissions/policies"; const query: any = { - ...(input.repository !== undefined && { repository: input.repository }), - ...(input.policyRevision !== undefined && { "policy-revision": input.policyRevision }), ...(input.domain !== undefined && { domain: input.domain }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), + ...(input.repository !== undefined && { repository: input.repository }), + ...(input.policyRevision !== undefined && { "policy-revision": input.policyRevision }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -422,8 +422,8 @@ export const serializeAws_restJson1DescribeDomainCommand = async ( }; let resolvedPath = "/v1/domain"; const query: any = { - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -449,12 +449,12 @@ export const serializeAws_restJson1DescribePackageVersionCommand = async ( let resolvedPath = "/v1/package/version"; const query: any = { ...(input.domain !== undefined && { domain: input.domain }), - ...(input.packageVersion !== undefined && { version: input.packageVersion }), - ...(input.package !== undefined && { package: input.package }), - ...(input.format !== undefined && { format: input.format }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.namespace !== undefined && { namespace: input.namespace }), ...(input.repository !== undefined && { repository: input.repository }), + ...(input.format !== undefined && { format: input.format }), + ...(input.namespace !== undefined && { namespace: input.namespace }), + ...(input.package !== undefined && { package: input.package }), + ...(input.packageVersion !== undefined && { version: input.packageVersion }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -506,10 +506,10 @@ export const serializeAws_restJson1DisassociateExternalConnectionCommand = async }; let resolvedPath = "/v1/repository/external-connection"; const query: any = { - ...(input.repository !== undefined && { repository: input.repository }), - ...(input.externalConnection !== undefined && { "external-connection": input.externalConnection }), ...(input.domain !== undefined && { domain: input.domain }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), + ...(input.repository !== undefined && { repository: input.repository }), + ...(input.externalConnection !== undefined && { "external-connection": input.externalConnection }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -534,12 +534,12 @@ export const serializeAws_restJson1DisposePackageVersionsCommand = async ( }; let resolvedPath = "/v1/package/versions/dispose"; const query: any = { + ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.repository !== undefined && { repository: input.repository }), + ...(input.format !== undefined && { format: input.format }), ...(input.namespace !== undefined && { namespace: input.namespace }), ...(input.package !== undefined && { package: input.package }), - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.format !== undefined && { format: input.format }), - ...(input.domain !== undefined && { domain: input.domain }), }; let body: any; body = JSON.stringify({ @@ -573,8 +573,8 @@ export const serializeAws_restJson1GetAuthorizationTokenCommand = async ( }; let resolvedPath = "/v1/authorization-token"; const query: any = { - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.durationSeconds !== undefined && { duration: input.durationSeconds.toString() }), }; let body: any; @@ -628,13 +628,13 @@ export const serializeAws_restJson1GetPackageVersionAssetCommand = async ( const query: any = { ...(input.domain !== undefined && { domain: input.domain }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.packageVersionRevision !== undefined && { revision: input.packageVersionRevision }), - ...(input.format !== undefined && { format: input.format }), ...(input.repository !== undefined && { repository: input.repository }), + ...(input.format !== undefined && { format: input.format }), + ...(input.namespace !== undefined && { namespace: input.namespace }), ...(input.package !== undefined && { package: input.package }), ...(input.packageVersion !== undefined && { version: input.packageVersion }), - ...(input.namespace !== undefined && { namespace: input.namespace }), ...(input.asset !== undefined && { asset: input.asset }), + ...(input.packageVersionRevision !== undefined && { revision: input.packageVersionRevision }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -659,13 +659,13 @@ export const serializeAws_restJson1GetPackageVersionReadmeCommand = async ( }; let resolvedPath = "/v1/package/version/readme"; const query: any = { - ...(input.package !== undefined && { package: input.package }), - ...(input.repository !== undefined && { repository: input.repository }), ...(input.domain !== undefined && { domain: input.domain }), - ...(input.namespace !== undefined && { namespace: input.namespace }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.packageVersion !== undefined && { version: input.packageVersion }), + ...(input.repository !== undefined && { repository: input.repository }), ...(input.format !== undefined && { format: input.format }), + ...(input.namespace !== undefined && { namespace: input.namespace }), + ...(input.package !== undefined && { package: input.package }), + ...(input.packageVersion !== undefined && { version: input.packageVersion }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -690,10 +690,10 @@ export const serializeAws_restJson1GetRepositoryEndpointCommand = async ( }; let resolvedPath = "/v1/repository/endpoint"; const query: any = { + ...(input.domain !== undefined && { domain: input.domain }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.format !== undefined && { format: input.format }), ...(input.repository !== undefined && { repository: input.repository }), - ...(input.domain !== undefined && { domain: input.domain }), + ...(input.format !== undefined && { format: input.format }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -718,8 +718,8 @@ export const serializeAws_restJson1GetRepositoryPermissionsPolicyCommand = async }; let resolvedPath = "/v1/repository/permissions/policy"; const query: any = { - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.repository !== undefined && { repository: input.repository }), }; let body: any; @@ -770,14 +770,14 @@ export const serializeAws_restJson1ListPackagesCommand = async ( }; let resolvedPath = "/v1/packages"; const query: any = { + ...(input.domain !== undefined && { domain: input.domain }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.repository !== undefined && { repository: input.repository }), + ...(input.format !== undefined && { format: input.format }), ...(input.namespace !== undefined && { namespace: input.namespace }), - ...(input.domain !== undefined && { domain: input.domain }), ...(input.packagePrefix !== undefined && { "package-prefix": input.packagePrefix }), ...(input.maxResults !== undefined && { "max-results": input.maxResults.toString() }), ...(input.nextToken !== undefined && { "next-token": input.nextToken }), - ...(input.format !== undefined && { format: input.format }), - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -802,15 +802,15 @@ export const serializeAws_restJson1ListPackageVersionAssetsCommand = async ( }; let resolvedPath = "/v1/package/version/assets"; const query: any = { - ...(input.format !== undefined && { format: input.format }), + ...(input.domain !== undefined && { domain: input.domain }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.namespace !== undefined && { namespace: input.namespace }), - ...(input.nextToken !== undefined && { "next-token": input.nextToken }), ...(input.repository !== undefined && { repository: input.repository }), - ...(input.maxResults !== undefined && { "max-results": input.maxResults.toString() }), - ...(input.packageVersion !== undefined && { version: input.packageVersion }), + ...(input.format !== undefined && { format: input.format }), + ...(input.namespace !== undefined && { namespace: input.namespace }), ...(input.package !== undefined && { package: input.package }), - ...(input.domain !== undefined && { domain: input.domain }), + ...(input.packageVersion !== undefined && { version: input.packageVersion }), + ...(input.maxResults !== undefined && { "max-results": input.maxResults.toString() }), + ...(input.nextToken !== undefined && { "next-token": input.nextToken }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -835,14 +835,14 @@ export const serializeAws_restJson1ListPackageVersionDependenciesCommand = async }; let resolvedPath = "/v1/package/version/dependencies"; const query: any = { - ...(input.packageVersion !== undefined && { version: input.packageVersion }), ...(input.domain !== undefined && { domain: input.domain }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.nextToken !== undefined && { "next-token": input.nextToken }), - ...(input.format !== undefined && { format: input.format }), ...(input.repository !== undefined && { repository: input.repository }), - ...(input.package !== undefined && { package: input.package }), + ...(input.format !== undefined && { format: input.format }), ...(input.namespace !== undefined && { namespace: input.namespace }), + ...(input.package !== undefined && { package: input.package }), + ...(input.packageVersion !== undefined && { version: input.packageVersion }), + ...(input.nextToken !== undefined && { "next-token": input.nextToken }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -868,15 +868,15 @@ export const serializeAws_restJson1ListPackageVersionsCommand = async ( let resolvedPath = "/v1/package/versions"; const query: any = { ...(input.domain !== undefined && { domain: input.domain }), - ...(input.package !== undefined && { package: input.package }), + ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.repository !== undefined && { repository: input.repository }), - ...(input.sortBy !== undefined && { sortBy: input.sortBy }), ...(input.format !== undefined && { format: input.format }), - ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.nextToken !== undefined && { "next-token": input.nextToken }), - ...(input.maxResults !== undefined && { "max-results": input.maxResults.toString() }), - ...(input.status !== undefined && { status: input.status }), ...(input.namespace !== undefined && { namespace: input.namespace }), + ...(input.package !== undefined && { package: input.package }), + ...(input.status !== undefined && { status: input.status }), + ...(input.sortBy !== undefined && { sortBy: input.sortBy }), + ...(input.maxResults !== undefined && { "max-results": input.maxResults.toString() }), + ...(input.nextToken !== undefined && { "next-token": input.nextToken }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -901,9 +901,9 @@ export const serializeAws_restJson1ListRepositoriesCommand = async ( }; let resolvedPath = "/v1/repositories"; const query: any = { + ...(input.repositoryPrefix !== undefined && { "repository-prefix": input.repositoryPrefix }), ...(input.maxResults !== undefined && { "max-results": input.maxResults.toString() }), ...(input.nextToken !== undefined && { "next-token": input.nextToken }), - ...(input.repositoryPrefix !== undefined && { "repository-prefix": input.repositoryPrefix }), }; let body: any; const { hostname, protocol = "https", port } = await context.endpoint(); @@ -928,10 +928,10 @@ export const serializeAws_restJson1ListRepositoriesInDomainCommand = async ( }; let resolvedPath = "/v1/domain/repositories"; const query: any = { - ...(input.administratorAccount !== undefined && { "administrator-account": input.administratorAccount }), ...(input.domain !== undefined && { domain: input.domain }), - ...(input.repositoryPrefix !== undefined && { "repository-prefix": input.repositoryPrefix }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), + ...(input.administratorAccount !== undefined && { "administrator-account": input.administratorAccount }), + ...(input.repositoryPrefix !== undefined && { "repository-prefix": input.repositoryPrefix }), ...(input.maxResults !== undefined && { "max-results": input.maxResults.toString() }), ...(input.nextToken !== undefined && { "next-token": input.nextToken }), }; @@ -1010,9 +1010,9 @@ export const serializeAws_restJson1PutRepositoryPermissionsPolicyCommand = async }; let resolvedPath = "/v1/repository/permissions/policy"; const query: any = { + ...(input.domain !== undefined && { domain: input.domain }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), ...(input.repository !== undefined && { repository: input.repository }), - ...(input.domain !== undefined && { domain: input.domain }), }; let body: any; body = JSON.stringify({ @@ -1098,11 +1098,11 @@ export const serializeAws_restJson1UpdatePackageVersionsStatusCommand = async ( let resolvedPath = "/v1/package/versions/update_status"; const query: any = { ...(input.domain !== undefined && { domain: input.domain }), - ...(input.package !== undefined && { package: input.package }), ...(input.domainOwner !== undefined && { "domain-owner": input.domainOwner }), - ...(input.format !== undefined && { format: input.format }), ...(input.repository !== undefined && { repository: input.repository }), + ...(input.format !== undefined && { format: input.format }), ...(input.namespace !== undefined && { namespace: input.namespace }), + ...(input.package !== undefined && { package: input.package }), }; let body: any; body = JSON.stringify({ @@ -2720,15 +2720,15 @@ export const deserializeAws_restJson1GetPackageVersionAssetCommand = async ( packageVersion: undefined, packageVersionRevision: undefined, }; - if (output.headers["x-packageversionrevision"] !== undefined) { - contents.packageVersionRevision = output.headers["x-packageversionrevision"]; - } if (output.headers["x-assetname"] !== undefined) { contents.assetName = output.headers["x-assetname"]; } if (output.headers["x-packageversion"] !== undefined) { contents.packageVersion = output.headers["x-packageversion"]; } + if (output.headers["x-packageversionrevision"] !== undefined) { + contents.packageVersionRevision = output.headers["x-packageversionrevision"]; + } const data: any = output.body; contents.asset = data; return Promise.resolve(contents); diff --git a/clients/client-codebuild/CodeBuild.ts b/clients/client-codebuild/CodeBuild.ts index 29d053899f39..2570e190a261 100644 --- a/clients/client-codebuild/CodeBuild.ts +++ b/clients/client-codebuild/CodeBuild.ts @@ -89,6 +89,11 @@ import { DescribeTestCasesCommandInput, DescribeTestCasesCommandOutput, } from "./commands/DescribeTestCasesCommand"; +import { + GetReportGroupTrendCommand, + GetReportGroupTrendCommandInput, + GetReportGroupTrendCommandOutput, +} from "./commands/GetReportGroupTrendCommand"; import { GetResourcePolicyCommand, GetResourcePolicyCommandInput, @@ -980,6 +985,35 @@ export class CodeBuild extends CodeBuildClient { } } + public getReportGroupTrend( + args: GetReportGroupTrendCommandInput, + options?: __HttpHandlerOptions + ): PromiseGets a resource policy that is identified by its resource ARN.
*/ diff --git a/clients/client-codebuild/CodeBuildClient.ts b/clients/client-codebuild/CodeBuildClient.ts index e42a7be9551c..6dec21c93f22 100644 --- a/clients/client-codebuild/CodeBuildClient.ts +++ b/clients/client-codebuild/CodeBuildClient.ts @@ -31,6 +31,10 @@ import { DescribeCodeCoveragesCommandOutput, } from "./commands/DescribeCodeCoveragesCommand"; import { DescribeTestCasesCommandInput, DescribeTestCasesCommandOutput } from "./commands/DescribeTestCasesCommand"; +import { + GetReportGroupTrendCommandInput, + GetReportGroupTrendCommandOutput, +} from "./commands/GetReportGroupTrendCommand"; import { GetResourcePolicyCommandInput, GetResourcePolicyCommandOutput } from "./commands/GetResourcePolicyCommand"; import { ImportSourceCredentialsCommandInput, @@ -148,6 +152,7 @@ export type ServiceInputTypes = | DeleteWebhookCommandInput | DescribeCodeCoveragesCommandInput | DescribeTestCasesCommandInput + | GetReportGroupTrendCommandInput | GetResourcePolicyCommandInput | ImportSourceCredentialsCommandInput | InvalidateProjectCacheCommandInput @@ -193,6 +198,7 @@ export type ServiceOutputTypes = | DeleteWebhookCommandOutput | DescribeCodeCoveragesCommandOutput | DescribeTestCasesCommandOutput + | GetReportGroupTrendCommandOutput | GetResourcePolicyCommandOutput | ImportSourceCredentialsCommandOutput | InvalidateProjectCacheCommandOutput diff --git a/clients/client-codebuild/commands/GetReportGroupTrendCommand.ts b/clients/client-codebuild/commands/GetReportGroupTrendCommand.ts new file mode 100644 index 000000000000..e2e0286329e1 --- /dev/null +++ b/clients/client-codebuild/commands/GetReportGroupTrendCommand.ts @@ -0,0 +1,85 @@ +import { CodeBuildClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CodeBuildClient"; +import { GetReportGroupTrendInput, GetReportGroupTrendOutput } from "../models/models_0"; +import { + deserializeAws_json1_1GetReportGroupTrendCommand, + serializeAws_json1_1GetReportGroupTrendCommand, +} from "../protocols/Aws_json1_1"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export type GetReportGroupTrendCommandInput = GetReportGroupTrendInput; +export type GetReportGroupTrendCommandOutput = GetReportGroupTrendOutput & __MetadataBearer; + +export class GetReportGroupTrendCommand extends $Command< + GetReportGroupTrendCommandInput, + GetReportGroupTrendCommandOutput, + CodeBuildClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetReportGroupTrendCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe ARN of the resource that is associated with the resource policy.
diff --git a/clients/client-codebuild/protocols/Aws_json1_1.ts b/clients/client-codebuild/protocols/Aws_json1_1.ts index da0046d1b475..0c094497a211 100644 --- a/clients/client-codebuild/protocols/Aws_json1_1.ts +++ b/clients/client-codebuild/protocols/Aws_json1_1.ts @@ -31,6 +31,10 @@ import { DescribeCodeCoveragesCommandOutput, } from "../commands/DescribeCodeCoveragesCommand"; import { DescribeTestCasesCommandInput, DescribeTestCasesCommandOutput } from "../commands/DescribeTestCasesCommand"; +import { + GetReportGroupTrendCommandInput, + GetReportGroupTrendCommandOutput, +} from "../commands/GetReportGroupTrendCommand"; import { GetResourcePolicyCommandInput, GetResourcePolicyCommandOutput } from "../commands/GetResourcePolicyCommand"; import { ImportSourceCredentialsCommandInput, @@ -139,6 +143,8 @@ import { EnvironmentPlatform, EnvironmentVariable, ExportedEnvironmentVariable, + GetReportGroupTrendInput, + GetReportGroupTrendOutput, GetResourcePolicyInput, GetResourcePolicyOutput, GitSubmodulesConfig, @@ -192,6 +198,8 @@ import { ReportExportConfig, ReportFilter, ReportGroup, + ReportGroupTrendStats, + ReportWithRawData, ResolvedArtifact, ResourceAlreadyExistsException, ResourceNotFoundException, @@ -469,6 +477,19 @@ export const serializeAws_json1_1DescribeTestCasesCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1GetReportGroupTrendCommand = async ( + input: GetReportGroupTrendCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "Content-Type": "application/x-amz-json-1.1", + "X-Amz-Target": "CodeBuild_20161006.GetReportGroupTrend", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1GetReportGroupTrendInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1GetResourcePolicyCommand = async ( input: GetResourcePolicyCommandInput, context: __SerdeContext @@ -1872,6 +1893,69 @@ const deserializeAws_json1_1DescribeTestCasesCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1GetReportGroupTrendCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseThe CodeStar Connections feature is in preview release and is subject to change.
- *This AWS CodeStar Connections API Reference provides descriptions and usage examples of * the operations and data types for the AWS CodeStar Connections API. You can use the * connections API to work with connections and installations.
@@ -53,7 +51,7 @@ import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; * associated with a unique ARN that is used to reference the connection. *When you create a connection, the console initiates a third-party connection handshake. * Installations are the apps that are used to conduct this handshake. For - * example, the installation for the Bitbucket provider type is the Bitbucket Cloud app. When you + * example, the installation for the Bitbucket provider type is the Bitbucket app. When you * create a connection, you can choose an existing installation or create one.
*When you want to create a connection to an installed provider type such as GitHub * Enterprise Server, you create a host for your connections.
@@ -457,4 +455,30 @@ export class CodeStarConnections extends CodeStarConnectionsClient { return this.send(command, optionsOrCb); } } + + /** + *Updates a specified host with the provided configurations.
+ */ + public updateHost(args: UpdateHostCommandInput, options?: __HttpHandlerOptions): PromiseThe CodeStar Connections feature is in preview release and is subject to change.
- *This AWS CodeStar Connections API Reference provides descriptions and usage examples of * the operations and data types for the AWS CodeStar Connections API. You can use the * connections API to work with connections and installations.
@@ -216,7 +216,7 @@ export type CodeStarConnectionsClientResolvedConfig = __SmithyResolvedConfigurat * associated with a unique ARN that is used to reference the connection. *When you create a connection, the console initiates a third-party connection handshake. * Installations are the apps that are used to conduct this handshake. For - * example, the installation for the Bitbucket provider type is the Bitbucket Cloud app. When you + * example, the installation for the Bitbucket provider type is the Bitbucket app. When you * create a connection, you can choose an existing installation or create one.
*When you want to create a connection to an installed provider type such as GitHub * Enterprise Server, you create a host for your connections.
diff --git a/clients/client-codestar-connections/commands/UpdateHostCommand.ts b/clients/client-codestar-connections/commands/UpdateHostCommand.ts new file mode 100644 index 000000000000..107cdd630585 --- /dev/null +++ b/clients/client-codestar-connections/commands/UpdateHostCommand.ts @@ -0,0 +1,92 @@ +import { + CodeStarConnectionsClientResolvedConfig, + ServiceInputTypes, + ServiceOutputTypes, +} from "../CodeStarConnectionsClient"; +import { UpdateHostInput, UpdateHostOutput } from "../models/models_0"; +import { + deserializeAws_json1_0UpdateHostCommand, + serializeAws_json1_0UpdateHostCommand, +} from "../protocols/Aws_json1_0"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export type UpdateHostCommandInput = UpdateHostInput; +export type UpdateHostCommandOutput = UpdateHostOutput & __MetadataBearer; + +/** + *Updates a specified host with the provided configurations.
+ */ +export class UpdateHostCommand extends $Command< + UpdateHostCommandInput, + UpdateHostCommandOutput, + CodeStarConnectionsClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateHostCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe tag's value.
+ *The tag's key.
*/ - Value: string | undefined; + Key: string | undefined; /** - *The tag's key.
+ *The tag's value.
*/ - Key: string | undefined; + Value: string | undefined; } export namespace Tag { @@ -31,9 +31,10 @@ export namespace Tag { export interface CreateConnectionInput { /** - *The key-value pair to use when tagging the resource.
+ *The name of the external provider where your third-party code repository is + * configured.
*/ - Tags?: Tag[]; + ProviderType?: ProviderType | string; /** *The name of the connection to be created. The name must be unique in the calling AWS @@ -42,10 +43,9 @@ export interface CreateConnectionInput { ConnectionName: string | undefined; /** - *
The name of the external provider where your third-party code repository is configured. - * The valid provider type is Bitbucket.
+ *The key-value pair to use when tagging the resource.
*/ - ProviderType?: ProviderType | string; + Tags?: Tag[]; /** *The Amazon Resource Name (ARN) of the host associated with the connection to be created.
@@ -131,9 +131,10 @@ export namespace ResourceUnavailableException { */ export interface VpcConfiguration { /** - *The value of the Transport Layer Security (TLS) certificate associated with the infrastructure where your provider type is installed.
+ *The ID of the Amazon VPC connected to the infrastructure where your provider type is + * installed.
*/ - TlsCertificate?: string; + VpcId: string | undefined; /** *The ID of the subnet or subnets associated with the Amazon VPC connected to the @@ -148,10 +149,9 @@ export interface VpcConfiguration { SecurityGroupIds: string[] | undefined; /** - *
The ID of the Amazon VPC connected to the infrastructure where your provider type is - * installed.
+ *The value of the Transport Layer Security (TLS) certificate associated with the infrastructure where your provider type is installed.
*/ - VpcId: string | undefined; + TlsCertificate?: string; } export namespace VpcConfiguration { @@ -161,6 +161,12 @@ export namespace VpcConfiguration { } export interface CreateHostInput { + /** + *The name of the host to be created. The name must be unique in the calling AWS + * account.
+ */ + Name: string | undefined; + /** *The name of the installed provider to be associated with your connection. The host * resource represents the infrastructure where your provider type is installed. The valid @@ -169,22 +175,16 @@ export interface CreateHostInput { ProviderType: ProviderType | string | undefined; /** - *
The name of the host to be created. The name must be unique in the calling AWS - * account.
+ *The endpoint of the infrastructure to be represented by the host after it is + * created.
*/ - Name: string | undefined; + ProviderEndpoint: string | undefined; /** *The VPC configuration to be provisioned for the host. A VPC must be configured and the * infrastructure to be represented by the host must already be connected to the VPC.
*/ VpcConfiguration?: VpcConfiguration; - - /** - *The endpoint of the infrastructure to be represented by the host after it is - * created.
- */ - ProviderEndpoint: string | undefined; } export namespace CreateHostInput { @@ -276,6 +276,11 @@ export enum ConnectionStatus { * connection in the console. */ export interface Connection { + /** + *The name of the connection. Connection names must be unique in an AWS user account.
+ */ + ConnectionName?: string; + /** *The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection * reference when the connection is shared between AWS services.
@@ -286,14 +291,16 @@ export interface Connection { ConnectionArn?: string; /** - *The name of the connection. Connection names must be unique in an AWS user account.
+ *The name of the external provider where your third-party code repository is + * configured.
*/ - ConnectionName?: string; + ProviderType?: ProviderType | string; /** - *The Amazon Resource Name (ARN) of the host associated with the connection.
+ *The identifier of the external provider where your third-party code repository is configured. + * For Bitbucket, this is the account ID of the owner of the Bitbucket repository.
*/ - HostArn?: string; + OwnerAccountId?: string; /** *The current status of the connection.
@@ -301,16 +308,9 @@ export interface Connection { ConnectionStatus?: ConnectionStatus | string; /** - *The name of the external provider where your third-party code repository is configured. - * The valid provider type is Bitbucket.
- */ - ProviderType?: ProviderType | string; - - /** - *The identifier of the external provider where your third-party code repository is configured. - * For Bitbucket, this is the account ID of the owner of the Bitbucket repository.
+ *The Amazon Resource Name (ARN) of the host associated with the connection.
*/ - OwnerAccountId?: string; + HostArn?: string; } export namespace Connection { @@ -347,14 +347,19 @@ export namespace GetHostInput { export interface GetHostOutput { /** - *The provider type of the requested host, such as GitHub Enterprise Server.
+ *The name of the requested host.
*/ - ProviderType?: ProviderType | string; + Name?: string; /** - *The name of the requested host.
+ *The status of the requested host.
*/ - Name?: string; + Status?: string; + + /** + *The provider type of the requested host, such as GitHub Enterprise Server.
+ */ + ProviderType?: ProviderType | string; /** *The endpoint of the infrastructure represented by the requested host.
@@ -365,11 +370,6 @@ export interface GetHostOutput { *The VPC configuration of the requested host.
*/ VpcConfiguration?: VpcConfiguration; - - /** - *The status of the requested host.
- */ - Status?: string; } export namespace GetHostOutput { @@ -385,6 +385,11 @@ export interface ListConnectionsInput { */ ProviderTypeFilter?: ProviderType | string; + /** + *Filters the list of connections to those associated with a specified host.
+ */ + HostArnFilter?: string; + /** *The maximum number of results to return in a single call. To retrieve the remaining
* results, make another call with the returned nextToken
value.
Filters the list of connections to those associated with a specified host.
- */ - HostArnFilter?: string; } export namespace ListConnectionsInput { @@ -461,9 +461,14 @@ export namespace ListHostsInput { */ export interface Host { /** - *The status description for the host.
+ *The name of the host.
*/ - StatusMessage?: string; + Name?: string; + + /** + *The Amazon Resource Name (ARN) of the host.
+ */ + HostArn?: string; /** *The name of the installed provider to be associated with your connection. The host @@ -473,14 +478,9 @@ export interface Host { ProviderType?: ProviderType | string; /** - *
The name of the host.
- */ - Name?: string; - - /** - *The Amazon Resource Name (ARN) of the host.
+ *The endpoint of the infrastructure where your provider type is installed.
*/ - HostArn?: string; + ProviderEndpoint?: string; /** *The VPC configuration provisioned for the host.
@@ -493,9 +493,9 @@ export interface Host { Status?: string; /** - *The endpoint of the infrastructure where your provider type is installed.
+ *The status description for the host.
*/ - ProviderEndpoint?: string; + StatusMessage?: string; } export namespace Host { @@ -602,3 +602,65 @@ export namespace UntagResourceOutput { ...obj, }); } + +/** + *Two conflicting operations have been made on the same resource.
+ */ +export interface ConflictException extends __SmithyException, $MetadataBearer { + name: "ConflictException"; + $fault: "client"; + Message?: string; +} + +export namespace ConflictException { + export const filterSensitiveLog = (obj: ConflictException): any => ({ + ...obj, + }); +} + +/** + *The operation is not supported. Check the connection status and try again.
+ */ +export interface UnsupportedOperationException extends __SmithyException, $MetadataBearer { + name: "UnsupportedOperationException"; + $fault: "client"; + Message?: string; +} + +export namespace UnsupportedOperationException { + export const filterSensitiveLog = (obj: UnsupportedOperationException): any => ({ + ...obj, + }); +} + +export interface UpdateHostInput { + /** + *The Amazon Resource Name (ARN) of the host to be updated.
+ */ + HostArn: string | undefined; + + /** + *The URL or endpoint of the host to be updated.
+ */ + ProviderEndpoint?: string; + + /** + *The VPC configuration of the host to be updated. A VPC must be configured and the + * infrastructure to be represented by the host must already be connected to the VPC.
+ */ + VpcConfiguration?: VpcConfiguration; +} + +export namespace UpdateHostInput { + export const filterSensitiveLog = (obj: UpdateHostInput): any => ({ + ...obj, + }); +} + +export interface UpdateHostOutput {} + +export namespace UpdateHostOutput { + export const filterSensitiveLog = (obj: UpdateHostOutput): any => ({ + ...obj, + }); +} diff --git a/clients/client-codestar-connections/protocols/Aws_json1_0.ts b/clients/client-codestar-connections/protocols/Aws_json1_0.ts index 2851a9995bce..997cdc3b3ccd 100644 --- a/clients/client-codestar-connections/protocols/Aws_json1_0.ts +++ b/clients/client-codestar-connections/protocols/Aws_json1_0.ts @@ -12,7 +12,9 @@ import { } from "../commands/ListTagsForResourceCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { UpdateHostCommandInput, UpdateHostCommandOutput } from "../commands/UpdateHostCommand"; import { + ConflictException, Connection, CreateConnectionInput, CreateConnectionOutput, @@ -39,8 +41,11 @@ import { Tag, TagResourceInput, TagResourceOutput, + UnsupportedOperationException, UntagResourceInput, UntagResourceOutput, + UpdateHostInput, + UpdateHostOutput, VpcConfiguration, } from "../models/models_0"; import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; @@ -196,6 +201,19 @@ export const serializeAws_json1_0UntagResourceCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_0UpdateHostCommand = async ( + input: UpdateHostCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "Content-Type": "application/x-amz-json-1.0", + "X-Amz-Target": "CodeStar_connections_20191201.UpdateHost", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0UpdateHostInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const deserializeAws_json1_0CreateConnectionCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -541,6 +559,14 @@ const deserializeAws_json1_0GetHostCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceUnavailableException": + case "com.amazonaws.codestarconnections#ResourceUnavailableException": + response = { + ...(await deserializeAws_json1_0ResourceUnavailableExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -825,6 +851,100 @@ const deserializeAws_json1_0UntagResourceCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_0UpdateHostCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseAccount takeover action type.
*/ export interface AccountTakeoverActionType { + /** + *Flag specifying whether to send a notification.
+ */ + Notify: boolean | undefined; + /** *The event action.
*Flag specifying whether to send a notification.
- */ - Notify: boolean | undefined; } export namespace AccountTakeoverActionType { @@ -97,11 +97,6 @@ export namespace AccountTakeoverActionType { *Account takeover actions type.
*/ export interface AccountTakeoverActionsType { - /** - *Action to take for a high risk.
- */ - HighAction?: AccountTakeoverActionType; - /** *Action to take for a low risk.
*/ @@ -111,6 +106,11 @@ export interface AccountTakeoverActionsType { *Action to take for a medium risk.
*/ MediumAction?: AccountTakeoverActionType; + + /** + *Action to take for a high risk.
+ */ + HighAction?: AccountTakeoverActionType; } export namespace AccountTakeoverActionsType { @@ -124,14 +124,14 @@ export namespace AccountTakeoverActionsType { */ export interface NotifyEmailType { /** - *The HTML body.
+ *The subject.
*/ - HtmlBody?: string; + Subject: string | undefined; /** - *The subject.
+ *The HTML body.
*/ - Subject: string | undefined; + HtmlBody?: string; /** *The text body.
@@ -150,9 +150,15 @@ export namespace NotifyEmailType { */ export interface NotifyConfigurationType { /** - *The email template used when a detected risk event is allowed.
+ *The email address that is sending the email. It must be either individually verified + * with Amazon SES, or from a domain that has been verified with Amazon SES.
*/ - NoActionEmail?: NotifyEmailType; + From?: string; + + /** + *The destination to which the receiver of an email should reply to.
+ */ + ReplyTo?: string; /** *The Amazon Resource Name (ARN) of the identity that is associated with the sending @@ -162,25 +168,19 @@ export interface NotifyConfigurationType { SourceArn: string | undefined; /** - *
The destination to which the receiver of an email should reply to.
- */ - ReplyTo?: string; - - /** - *The MFA email template used when MFA is challenged as part of a detected risk.
+ *Email template used when a detected risk event is blocked.
*/ - MfaEmail?: NotifyEmailType; + BlockEmail?: NotifyEmailType; /** - *The email address that is sending the email. It must be either individually verified - * with Amazon SES, or from a domain that has been verified with Amazon SES.
+ *The email template used when a detected risk event is allowed.
*/ - From?: string; + NoActionEmail?: NotifyEmailType; /** - *Email template used when a detected risk event is blocked.
+ *The MFA email template used when MFA is challenged as part of a detected risk.
*/ - BlockEmail?: NotifyEmailType; + MfaEmail?: NotifyEmailType; } export namespace NotifyConfigurationType { @@ -195,14 +195,14 @@ export namespace NotifyConfigurationType { */ export interface AccountTakeoverRiskConfigurationType { /** - *Account takeover risk configuration actions
+ *The notify configuration used to construct email notifications.
*/ - Actions: AccountTakeoverActionsType | undefined; + NotifyConfiguration?: NotifyConfigurationType; /** - *The notify configuration used to construct email notifications.
+ *Account takeover risk configuration actions
*/ - NotifyConfiguration?: NotifyConfigurationType; + Actions: AccountTakeoverActionsType | undefined; } export namespace AccountTakeoverRiskConfigurationType { @@ -265,32 +265,15 @@ export namespace StringAttributeConstraintsType { */ export interface SchemaAttributeType { /** - *Specifies whether the value of the attribute can be changed.
- *For any user pool attribute that's mapped to an identity provider attribute, you must
- * set this parameter to true
. Amazon Cognito updates mapped attributes when
- * users sign in to your application through an identity provider. If an attribute is
- * immutable, Amazon Cognito throws an error when it attempts to update the attribute. For
- * more information, see Specifying Identity Provider Attribute Mappings for Your User
- * Pool.
Specifies the constraints for an attribute of the string type.
+ *A schema attribute of the name type.
*/ - StringAttributeConstraints?: StringAttributeConstraintsType; + Name?: string; /** *The attribute data type.
*/ AttributeDataType?: AttributeDataType | string; - /** - *Specifies whether a user pool attribute is required. If the attribute is required and - * the user does not provide a value, registration or sign-in will fail.
- */ - Required?: boolean; - /** *We recommend that you use WriteAttributes in the user pool client to control how attributes can @@ -306,14 +289,31 @@ export interface SchemaAttributeType { DeveloperOnlyAttribute?: boolean; /** - *
A schema attribute of the name type.
+ *Specifies whether the value of the attribute can be changed.
+ *For any user pool attribute that's mapped to an identity provider attribute, you must
+ * set this parameter to true
. Amazon Cognito updates mapped attributes when
+ * users sign in to your application through an identity provider. If an attribute is
+ * immutable, Amazon Cognito throws an error when it attempts to update the attribute. For
+ * more information, see Specifying Identity Provider Attribute Mappings for Your User
+ * Pool.
Specifies whether a user pool attribute is required. If the attribute is required and + * the user does not provide a value, registration or sign-in will fail.
+ */ + Required?: boolean; /** *Specifies the constraints for an attribute of the number type.
*/ NumberAttributeConstraints?: NumberAttributeConstraintsType; + + /** + *Specifies the constraints for an attribute of the string type.
+ */ + StringAttributeConstraints?: StringAttributeConstraintsType; } export namespace SchemaAttributeType { @@ -327,14 +327,14 @@ export namespace SchemaAttributeType { */ export interface AddCustomAttributesRequest { /** - *An array of custom attributes, such as Mutable and Name.
+ *The user pool ID for the user pool where you want to add custom attributes.
*/ - CustomAttributes: SchemaAttributeType[] | undefined; + UserPoolId: string | undefined; /** - *The user pool ID for the user pool where you want to add custom attributes.
+ *An array of custom attributes, such as Mutable and Name.
*/ - UserPoolId: string | undefined; + CustomAttributes: SchemaAttributeType[] | undefined; } export namespace AddCustomAttributesRequest { @@ -472,6 +472,11 @@ export namespace UserImportInProgressException { } export interface AdminAddUserToGroupRequest { + /** + *The user pool ID for the user pool.
+ */ + UserPoolId: string | undefined; + /** *The username for the user.
*/ @@ -481,11 +486,6 @@ export interface AdminAddUserToGroupRequest { *The group name.
*/ GroupName: string | undefined; - - /** - *The user pool ID for the user pool.
- */ - UserPoolId: string | undefined; } export namespace AdminAddUserToGroupRequest { @@ -717,6 +717,11 @@ export namespace AttributeType { *Represents the request to create a user in the specified user pool.
*/ export interface AdminCreateUserRequest { + /** + *The user pool ID for the user pool where the user will be created.
+ */ + UserPoolId: string | undefined; + /** *The username for the user. Must be unique within the user pool. Must be a UTF-8 string * between 1 and 128 characters. After the user is created, the username cannot be @@ -725,24 +730,42 @@ export interface AdminCreateUserRequest { Username: string | undefined; /** - *
Specify "EMAIL"
if email will be used to send the welcome message.
- * Specify "SMS"
if the phone number will be used. The default value is
- * "SMS"
. More than one value can be specified.
The user pool ID for the user pool where the user will be created.
- */ - UserPoolId: string | undefined; - - /** - *Set to "RESEND"
to resend the invitation message to a user that already
- * exists and reset the expiration limit on the user's account. Set to
- * "SUPPRESS"
to suppress sending the message. Only one value can be
- * specified.
An array of name-value pairs that contain user attributes and attribute values to be
+ * set for the user to be created. You can create a user without specifying any attributes
+ * other than Username
. However, any attributes that you specify as required
+ * (when creating a user pool or in the Attributes tab of the console) must be supplied either by you (in your
+ * call to AdminCreateUser
) or by the user (when he or she signs up in
+ * response to your welcome message).
For custom attributes, you must prepend the custom:
prefix to the
+ * attribute name.
To send a message inviting the user to sign up, you must specify the user's email + * address or phone number. This can be done in your call to AdminCreateUser or in the + * Users tab of the Amazon Cognito console for + * managing your user pools.
+ *In your call to AdminCreateUser
, you can set the
+ * email_verified
attribute to True
, and you can set the
+ * phone_number_verified
attribute to True
. (You can also do
+ * this by calling AdminUpdateUserAttributes.)
+ * email: The email address of the user to whom
+ * the message that contains the code and username will be sent. Required if the
+ * email_verified
attribute is set to True
, or if
+ * "EMAIL"
is specified in the DesiredDeliveryMediums
+ * parameter.
+ * phone_number: The phone number of the user to
+ * whom the message that contains the code and username will be sent. Required if
+ * the phone_number_verified
attribute is set to True
, or
+ * if "SMS"
is specified in the DesiredDeliveryMediums
+ * parameter.
The user's validation data. This is an array of name-value pairs that contain user @@ -786,42 +809,19 @@ export interface AdminCreateUserRequest { ForceAliasCreation?: boolean; /** - *
An array of name-value pairs that contain user attributes and attribute values to be
- * set for the user to be created. You can create a user without specifying any attributes
- * other than Username
. However, any attributes that you specify as required
- * (when creating a user pool or in the Attributes tab of the console) must be supplied either by you (in your
- * call to AdminCreateUser
) or by the user (when he or she signs up in
- * response to your welcome message).
For custom attributes, you must prepend the custom:
prefix to the
- * attribute name.
To send a message inviting the user to sign up, you must specify the user's email - * address or phone number. This can be done in your call to AdminCreateUser or in the - * Users tab of the Amazon Cognito console for - * managing your user pools.
- *In your call to AdminCreateUser
, you can set the
- * email_verified
attribute to True
, and you can set the
- * phone_number_verified
attribute to True
. (You can also do
- * this by calling AdminUpdateUserAttributes.)
- * email: The email address of the user to whom
- * the message that contains the code and username will be sent. Required if the
- * email_verified
attribute is set to True
, or if
- * "EMAIL"
is specified in the DesiredDeliveryMediums
- * parameter.
- * phone_number: The phone number of the user to
- * whom the message that contains the code and username will be sent. Required if
- * the phone_number_verified
attribute is set to True
, or
- * if "SMS"
is specified in the DesiredDeliveryMediums
- * parameter.
Set to "RESEND"
to resend the invitation message to a user that already
+ * exists and reset the expiration limit on the user's account. Set to
+ * "SUPPRESS"
to suppress sending the message. Only one value can be
+ * specified.
Specify "EMAIL"
if email will be used to send the welcome message.
+ * Specify "SMS"
if the phone number will be used. The default value is
+ * "SMS"
. More than one value can be specified.
A map of custom key-value pairs that you can provide as input for any custom workflows @@ -863,13 +863,13 @@ export namespace AdminCreateUserRequest { export const filterSensitiveLog = (obj: AdminCreateUserRequest): any => ({ ...obj, ...(obj.Username && { Username: SENSITIVE_STRING }), + ...(obj.UserAttributes && { + UserAttributes: obj.UserAttributes.map((item) => AttributeType.filterSensitiveLog(item)), + }), ...(obj.ValidationData && { ValidationData: obj.ValidationData.map((item) => AttributeType.filterSensitiveLog(item)), }), ...(obj.TemporaryPassword && { TemporaryPassword: SENSITIVE_STRING }), - ...(obj.UserAttributes && { - UserAttributes: obj.UserAttributes.map((item) => AttributeType.filterSensitiveLog(item)), - }), }); } @@ -880,17 +880,17 @@ export namespace AdminCreateUserRequest { * configurations.
*/ export interface MFAOptionType { - /** - *The attribute name of the MFA option type. The only valid value is
- * phone_number
.
The delivery medium to send the MFA code. You can use this parameter to set only the
* SMS
delivery medium value.
The attribute name of the MFA option type. The only valid value is
+ * phone_number
.
Specifies whether the user is enabled.
+ *The user name of the user you wish to describe.
*/ - Enabled?: boolean; + Username?: string; /** *A container with information about the user type attributes.
@@ -929,13 +929,18 @@ export interface UserType { UserCreateDate?: Date; /** - *The user name of the user you wish to describe.
+ *The last modified date of the user.
*/ - Username?: string; + UserLastModifiedDate?: Date; /** - *The user status. Can be one of the following:
- *Specifies whether the user is enabled.
+ */ + Enabled?: boolean; + + /** + *The user status. Can be one of the following:
+ *UNCONFIRMED - User has been created but not confirmed.
*The MFA options for the user.
*/ MFAOptions?: MFAOptionType[]; - - /** - *The last modified date of the user.
- */ - UserLastModifiedDate?: Date; } export namespace UserType { export const filterSensitiveLog = (obj: UserType): any => ({ ...obj, - ...(obj.Attributes && { Attributes: obj.Attributes.map((item) => AttributeType.filterSensitiveLog(item)) }), ...(obj.Username && { Username: SENSITIVE_STRING }), + ...(obj.Attributes && { Attributes: obj.Attributes.map((item) => AttributeType.filterSensitiveLog(item)) }), }); } @@ -1139,20 +1139,20 @@ export namespace UsernameExistsException { *The message template structure.
*/ export interface MessageTemplateType { - /** - *The subject line for email messages.
- */ - EmailSubject?: string; - /** *The message template for SMS messages.
*/ SMSMessage?: string; /** - *The message template for email messages.
+ *The message template for email messages. EmailMessage is allowed only if EmailSendingAccount is DEVELOPER.
*/ EmailMessage?: string; + + /** + *The subject line for email messages. EmailSubject is allowed only if EmailSendingAccount is DEVELOPER.
+ */ + EmailSubject?: string; } export namespace MessageTemplateType { @@ -1171,12 +1171,6 @@ export interface AdminCreateUserConfigType { */ AllowAdminCreateUserOnly?: boolean; - /** - *The message template to be used for the welcome message to new users.
- *See also Customizing User Invitation Messages.
- */ - InviteMessageTemplate?: MessageTemplateType; - /** *The user account expiration limit, in days, after which the account is no longer * usable. To reset the account after that time limit, you must call @@ -1190,6 +1184,12 @@ export interface AdminCreateUserConfigType { *
The message template to be used for the welcome message to new users.
+ *See also Customizing User Invitation Messages.
+ */ + InviteMessageTemplate?: MessageTemplateType; } export namespace AdminCreateUserConfigType { @@ -1225,14 +1225,14 @@ export namespace AdminDeleteUserRequest { */ export interface AdminDeleteUserAttributesRequest { /** - *The user name of the user from which you would like to delete attributes.
+ *The user pool ID for the user pool where you want to delete user attributes.
*/ - Username: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID for the user pool where you want to delete user attributes.
+ *The user name of the user from which you would like to delete attributes.
*/ - UserPoolId: string | undefined; + Username: string | undefined; /** *An array of strings representing the user attribute names you wish to delete.
@@ -1265,12 +1265,6 @@ export namespace AdminDeleteUserAttributesResponse { *A container for information about an identity provider for a user pool.
*/ export interface ProviderUserIdentifierType { - /** - *The value of the provider attribute to link to, for example,
- * xxxxx_account
.
The name of the provider, for example, Facebook, Google, or Login with Amazon.
*/ @@ -1281,6 +1275,12 @@ export interface ProviderUserIdentifierType { *NameID
.
*/
ProviderAttributeName?: string;
+
+ /**
+ * The value of the provider attribute to link to, for example,
+ * xxxxx_account
.
The user name of the user you wish to disable.
+ *The user pool ID for the user pool where you want to disable the user.
*/ - Username: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID for the user pool where you want to disable the user.
+ *The user name of the user you wish to disable.
*/ - UserPoolId: string | undefined; + Username: string | undefined; } export namespace AdminDisableUserRequest { @@ -1408,9 +1408,9 @@ export namespace AdminEnableUserResponse { */ export interface AdminForgetDeviceRequest { /** - *The device key.
+ *The user pool ID.
*/ - DeviceKey: string | undefined; + UserPoolId: string | undefined; /** *The user name.
@@ -1418,9 +1418,9 @@ export interface AdminForgetDeviceRequest { Username: string | undefined; /** - *The user pool ID.
+ *The device key.
*/ - UserPoolId: string | undefined; + DeviceKey: string | undefined; } export namespace AdminForgetDeviceRequest { @@ -1458,14 +1458,14 @@ export interface AdminGetDeviceRequest { DeviceKey: string | undefined; /** - *The user name.
+ *The user pool ID.
*/ - Username: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID.
+ *The user name.
*/ - UserPoolId: string | undefined; + Username: string | undefined; } export namespace AdminGetDeviceRequest { @@ -1480,9 +1480,14 @@ export namespace AdminGetDeviceRequest { */ export interface DeviceType { /** - *The last modified date of the device.
+ *The device key.
*/ - DeviceLastModifiedDate?: Date; + DeviceKey?: string; + + /** + *The device attributes.
+ */ + DeviceAttributes?: AttributeType[]; /** *The creation date of the device.
@@ -1490,19 +1495,14 @@ export interface DeviceType { DeviceCreateDate?: Date; /** - *The device key.
+ *The last modified date of the device.
*/ - DeviceKey?: string; + DeviceLastModifiedDate?: Date; /** *The date in which the device was last authenticated.
*/ DeviceLastAuthenticatedDate?: Date; - - /** - *The device attributes.
- */ - DeviceAttributes?: AttributeType[]; } export namespace DeviceType { @@ -1560,19 +1560,14 @@ export namespace AdminGetUserRequest { */ export interface AdminGetUserResponse { /** - *- * This response parameter is no longer supported. It provides - * information only about SMS MFA configurations. It doesn't provide information about TOTP - * software token MFA configurations. To look up information about either type of MFA - * configuration, use UserMFASettingList instead.
+ *The user name of the user about whom you are receiving information.
*/ - MFAOptions?: MFAOptionType[]; + Username: string | undefined; /** - *The MFA options that are enabled for the user. The possible values in this list are
- * SMS_MFA
and SOFTWARE_TOKEN_MFA
.
An array of name-value pairs representing user attributes.
*/ - UserMFASettingList?: string[]; + UserAttributes?: AttributeType[]; /** *The date the user was created.
@@ -1580,30 +1575,15 @@ export interface AdminGetUserResponse { UserCreateDate?: Date; /** - *An array of name-value pairs representing user attributes.
+ *The date the user was last modified.
*/ - UserAttributes?: AttributeType[]; + UserLastModifiedDate?: Date; /** *Indicates that the status is enabled.
*/ Enabled?: boolean; - /** - *The user name of the user about whom you are receiving information.
- */ - Username: string | undefined; - - /** - *The user's preferred MFA setting.
- */ - PreferredMfaSetting?: string; - - /** - *The date the user was last modified.
- */ - UserLastModifiedDate?: Date; - /** *The user status. Can be one of the following:
*+ * This response parameter is no longer supported. It provides + * information only about SMS MFA configurations. It doesn't provide information about TOTP + * software token MFA configurations. To look up information about either type of MFA + * configuration, use UserMFASettingList instead.
+ */ + MFAOptions?: MFAOptionType[]; + + /** + *The user's preferred MFA setting.
+ */ + PreferredMfaSetting?: string; + + /** + *The MFA options that are enabled for the user. The possible values in this list are
+ * SMS_MFA
and SOFTWARE_TOKEN_MFA
.
The header value.
+ *The header name
*/ - headerValue?: string; + headerName?: string; /** - *The header name
+ *The header value.
*/ - headerName?: string; + headerValue?: string; } export namespace HttpHeader { @@ -1703,11 +1703,21 @@ export namespace HttpHeader { * Amazon Cognito advanced security. */ export interface ContextDataType { + /** + *Source IP address of your user.
+ */ + IpAddress: string | undefined; + /** *Your server endpoint where this API is invoked.
*/ ServerName: string | undefined; + /** + *Your server path where this API is invoked.
+ */ + ServerPath: string | undefined; + /** *HttpHeaders received on your server in same order.
*/ @@ -1718,16 +1728,6 @@ export interface ContextDataType { * Cognito context data collection library. */ EncodedData?: string; - - /** - *Your server path where this API is invoked.
- */ - ServerPath: string | undefined; - - /** - *Source IP address of your user.
- */ - IpAddress: string | undefined; } export namespace ContextDataType { @@ -1741,84 +1741,14 @@ export namespace ContextDataType { */ export interface AdminInitiateAuthRequest { /** - *The analytics metadata for collecting Amazon Pinpoint metrics for
- * AdminInitiateAuth
calls.
The ID of the Amazon Cognito user pool.
*/ - AnalyticsMetadata?: AnalyticsMetadataType; + UserPoolId: string | undefined; /** - *A map of custom key-value pairs that you can provide as input for certain custom - * workflows that this action triggers.
- *You create custom workflows by assigning AWS Lambda functions to user pool triggers. - * When you use the AdminInitiateAuth API action, Amazon Cognito invokes the AWS Lambda - * functions that are specified for various triggers. The ClientMetadata value is passed as - * input to the functions for only the following triggers:
- *Pre signup
- *Pre authentication
- *User migration
- *When Amazon Cognito invokes the functions for these triggers, it passes a JSON
- * payload, which the function receives as input. This payload contains a
- * validationData
attribute, which provides the data that you assigned to
- * the ClientMetadata parameter in your AdminInitiateAuth request. In your function code in
- * AWS Lambda, you can process the validationData
value to enhance your
- * workflow for your specific needs.
When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the - * functions for the following triggers, but it does not provide the ClientMetadata value - * as input:
- * - *Post authentication
- *Custom message
- *Pre token generation
- *Create auth challenge
- *Define auth challenge
- *Verify auth challenge
- *For more information, see Customizing User Pool Workflows with Lambda Triggers in the - * Amazon Cognito Developer Guide.
- *Take the following limitations into consideration when you use the ClientMetadata - * parameter:
- *Amazon Cognito does not store the ClientMetadata value. This data is - * available only to AWS Lambda triggers that are assigned to a user pool to - * support custom workflows. If your user pool configuration does not include - * triggers, the ClientMetadata parameter serves no purpose.
- *Amazon Cognito does not validate the ClientMetadata value.
- *Amazon Cognito does not encrypt the the ClientMetadata value, so don't use - * it to provide sensitive information.
- *The app client ID.
*/ - ClientMetadata?: { [key: string]: string }; + ClientId: string | undefined; /** *The authentication flow for this call to execute. The API action will depend on this @@ -1882,18 +1812,6 @@ export interface AdminInitiateAuthRequest { */ AuthFlow: AuthFlowType | string | undefined; - /** - *
Contextual data such as the user's device fingerprint, IP address, or location used - * for evaluating the risk of an unexpected event by Amazon Cognito advanced - * security.
- */ - ContextData?: ContextDataType; - - /** - *The ID of the Amazon Cognito user pool.
- */ - UserPoolId: string | undefined; - /** *The authentication parameters. These are inputs corresponding to the
* AuthFlow
that you are invoking. The required values depend on the value
@@ -1926,16 +1844,98 @@ export interface AdminInitiateAuthRequest {
AuthParameters?: { [key: string]: string };
/**
- *
The app client ID.
+ *A map of custom key-value pairs that you can provide as input for certain custom + * workflows that this action triggers.
+ *You create custom workflows by assigning AWS Lambda functions to user pool triggers. + * When you use the AdminInitiateAuth API action, Amazon Cognito invokes the AWS Lambda + * functions that are specified for various triggers. The ClientMetadata value is passed as + * input to the functions for only the following triggers:
+ *Pre signup
+ *Pre authentication
+ *User migration
+ *When Amazon Cognito invokes the functions for these triggers, it passes a JSON
+ * payload, which the function receives as input. This payload contains a
+ * validationData
attribute, which provides the data that you assigned to
+ * the ClientMetadata parameter in your AdminInitiateAuth request. In your function code in
+ * AWS Lambda, you can process the validationData
value to enhance your
+ * workflow for your specific needs.
When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the + * functions for the following triggers, but it does not provide the ClientMetadata value + * as input:
+ * + *Post authentication
+ *Custom message
+ *Pre token generation
+ *Create auth challenge
+ *Define auth challenge
+ *Verify auth challenge
+ *For more information, see Customizing User Pool Workflows with Lambda Triggers in the + * Amazon Cognito Developer Guide.
+ *Take the following limitations into consideration when you use the ClientMetadata + * parameter:
+ *Amazon Cognito does not store the ClientMetadata value. This data is + * available only to AWS Lambda triggers that are assigned to a user pool to + * support custom workflows. If your user pool configuration does not include + * triggers, the ClientMetadata parameter serves no purpose.
+ *Amazon Cognito does not validate the ClientMetadata value.
+ *Amazon Cognito does not encrypt the the ClientMetadata value, so don't use + * it to provide sensitive information.
+ *The analytics metadata for collecting Amazon Pinpoint metrics for
+ * AdminInitiateAuth
calls.
Contextual data such as the user's device fingerprint, IP address, or location used + * for evaluating the risk of an unexpected event by Amazon Cognito advanced + * security.
+ */ + ContextData?: ContextDataType; } export namespace AdminInitiateAuthRequest { export const filterSensitiveLog = (obj: AdminInitiateAuthRequest): any => ({ ...obj, - ...(obj.AuthParameters && { AuthParameters: SENSITIVE_STRING }), ...(obj.ClientId && { ClientId: SENSITIVE_STRING }), + ...(obj.AuthParameters && { AuthParameters: SENSITIVE_STRING }), }); } @@ -1964,15 +1964,20 @@ export namespace NewDeviceMetadataType { *The authentication result.
*/ export interface AuthenticationResultType { + /** + *The access token.
+ */ + AccessToken?: string; + /** *The expiration period of the authentication result in seconds.
*/ ExpiresIn?: number; /** - *The new device metadata from an authentication result.
+ *The token type.
*/ - NewDeviceMetadata?: NewDeviceMetadataType; + TokenType?: string; /** *The refresh token.
@@ -1985,22 +1990,17 @@ export interface AuthenticationResultType { IdToken?: string; /** - *The token type.
- */ - TokenType?: string; - - /** - *The access token.
+ *The new device metadata from an authentication result.
*/ - AccessToken?: string; + NewDeviceMetadata?: NewDeviceMetadataType; } export namespace AuthenticationResultType { export const filterSensitiveLog = (obj: AuthenticationResultType): any => ({ ...obj, + ...(obj.AccessToken && { AccessToken: SENSITIVE_STRING }), ...(obj.RefreshToken && { RefreshToken: SENSITIVE_STRING }), ...(obj.IdToken && { IdToken: SENSITIVE_STRING }), - ...(obj.AccessToken && { AccessToken: SENSITIVE_STRING }), }); } @@ -2084,14 +2084,6 @@ export interface AdminInitiateAuthResponse { */ ChallengeName?: ChallengeNameType | string; - /** - *The result of the authentication response. This is only returned if the caller does
- * not need to pass another challenge. If the caller does need to pass another challenge
- * before it gets tokens, ChallengeName
, ChallengeParameters
, and
- * Session
are returned.
The session which should be passed both ways in challenge-response calls to the
* service. If AdminInitiateAuth
or AdminRespondToAuthChallenge
@@ -2115,6 +2107,14 @@ export interface AdminInitiateAuthResponse {
* USERNAME
attribute cannot be an alias.
The result of the authentication response. This is only returned if the caller does
+ * not need to pass another challenge. If the caller does need to pass another challenge
+ * before it gets tokens, ChallengeName
, ChallengeParameters
, and
+ * Session
are returned.
The user pool ID for the user pool.
+ */ + UserPoolId: string | undefined; + + /** + *The existing user in the user pool to be linked to the external identity provider user + * account. Can be a native (Username + Password) Cognito User Pools user or a federated + * user (for example, a SAML or Facebook user). If the user doesn't exist, an exception is + * thrown. This is the user that is returned when the new user (with the linked identity + * provider attribute) signs in.
+ *For a native username + password user, the ProviderAttributeValue
for the
+ * DestinationUser
should be the username in the user pool. For a
+ * federated user, it should be the provider-specific user_id
.
The ProviderAttributeName
of the DestinationUser
is
+ * ignored.
The ProviderName
should be set to Cognito
for users in
+ * Cognito user pools.
An external identity provider account for a user who does not currently exist yet in * the user pool. This user must be a federated user (for example, a SAML or Facebook @@ -2206,27 +2227,6 @@ export interface AdminLinkProviderForUserRequest { * token.
*/ SourceUser: ProviderUserIdentifierType | undefined; - - /** - *The existing user in the user pool to be linked to the external identity provider user - * account. Can be a native (Username + Password) Cognito User Pools user or a federated - * user (for example, a SAML or Facebook user). If the user doesn't exist, an exception is - * thrown. This is the user that is returned when the new user (with the linked identity - * provider attribute) signs in.
- *For a native username + password user, the ProviderAttributeValue
for the
- * DestinationUser
should be the username in the user pool. For a
- * federated user, it should be the provider-specific user_id
.
The ProviderAttributeName
of the DestinationUser
is
- * ignored.
The ProviderName
should be set to Cognito
for users in
- * Cognito user pools.
The user pool ID for the user pool.
- */ - UserPoolId: string | undefined; } export namespace AdminLinkProviderForUserRequest { @@ -2248,24 +2248,24 @@ export namespace AdminLinkProviderForUserResponse { */ export interface AdminListDevicesRequest { /** - *The pagination token.
+ *The user pool ID.
*/ - PaginationToken?: string; + UserPoolId: string | undefined; /** - *The limit of the devices request.
+ *The user name.
*/ - Limit?: number; + Username: string | undefined; /** - *The user name.
+ *The limit of the devices request.
*/ - Username: string | undefined; + Limit?: number; /** - *The user pool ID.
+ *The pagination token.
*/ - UserPoolId: string | undefined; + PaginationToken?: string; } export namespace AdminListDevicesRequest { @@ -2280,14 +2280,14 @@ export namespace AdminListDevicesRequest { */ export interface AdminListDevicesResponse { /** - *The pagination token.
+ *The devices in the list of devices response.
*/ - PaginationToken?: string; + Devices?: DeviceType[]; /** - *The devices in the list of devices response.
+ *The pagination token.
*/ - Devices?: DeviceType[]; + PaginationToken?: string; } export namespace AdminListDevicesResponse { @@ -2298,9 +2298,9 @@ export namespace AdminListDevicesResponse { export interface AdminListGroupsForUserRequest { /** - *The limit of the request to list groups.
+ *The username for the user.
*/ - Limit?: number; + Username: string | undefined; /** *The user pool ID for the user pool.
@@ -2308,15 +2308,15 @@ export interface AdminListGroupsForUserRequest { UserPoolId: string | undefined; /** - *An identifier that was returned from the previous call to this operation, which can be - * used to return the next set of items in the list.
+ *The limit of the request to list groups.
*/ - NextToken?: string; + Limit?: number; /** - *The username for the user.
+ *An identifier that was returned from the previous call to this operation, which can be + * used to return the next set of items in the list.
*/ - Username: string | undefined; + NextToken?: string; } export namespace AdminListGroupsForUserRequest { @@ -2330,30 +2330,20 @@ export namespace AdminListGroupsForUserRequest { *The group type.
*/ export interface GroupType { - /** - *The user pool ID for the user pool.
- */ - UserPoolId?: string; - - /** - *A string containing the description of the group.
- */ - Description?: string; - /** *The name of the group.
*/ GroupName?: string; /** - *The date the group was last modified.
+ *The user pool ID for the user pool.
*/ - LastModifiedDate?: Date; + UserPoolId?: string; /** - *The date the group was created.
+ *A string containing the description of the group.
*/ - CreationDate?: Date; + Description?: string; /** *The role ARN for the group.
@@ -2377,6 +2367,16 @@ export interface GroupType { *The default Precedence
value is null.
The date the group was last modified.
+ */ + LastModifiedDate?: Date; + + /** + *The date the group was created.
+ */ + CreationDate?: Date; } export namespace GroupType { @@ -2406,9 +2406,9 @@ export namespace AdminListGroupsForUserResponse { export interface AdminListUserAuthEventsRequest { /** - *A pagination token.
+ *The user pool ID.
*/ - NextToken?: string; + UserPoolId: string | undefined; /** *The user pool username or an alias.
@@ -2421,9 +2421,9 @@ export interface AdminListUserAuthEventsRequest { MaxResults?: number; /** - *The user pool ID.
+ *A pagination token.
*/ - UserPoolId: string | undefined; + NextToken?: string; } export namespace AdminListUserAuthEventsRequest { @@ -2448,14 +2448,14 @@ export enum ChallengeResponse { */ export interface ChallengeResponseType { /** - *The challenge response.
+ *The challenge name
*/ - ChallengeResponse?: ChallengeResponse | string; + ChallengeName?: ChallengeName | string; /** - *The challenge name
+ *The challenge response.
*/ - ChallengeName?: ChallengeName | string; + ChallengeResponse?: ChallengeResponse | string; } export namespace ChallengeResponseType { @@ -2469,29 +2469,29 @@ export namespace ChallengeResponseType { */ export interface EventContextDataType { /** - *The user's time zone.
+ *The user's IP address.
*/ - Timezone?: string; + IpAddress?: string; /** - *The user's city.
+ *The user's device name.
*/ - City?: string; + DeviceName?: string; /** - *The user's country.
+ *The user's time zone.
*/ - Country?: string; + Timezone?: string; /** - *The user's IP address.
+ *The user's city.
*/ - IpAddress?: string; + City?: string; /** - *The user's device name.
+ *The user's country.
*/ - DeviceName?: string; + Country?: string; } export namespace EventContextDataType { @@ -2509,11 +2509,6 @@ export enum FeedbackValueType { *Specifies the event feedback type.
*/ export interface EventFeedbackType { - /** - *The event feedback date.
- */ - FeedbackDate?: Date; - /** *The event feedback value.
*/ @@ -2523,6 +2518,11 @@ export interface EventFeedbackType { *The provider.
*/ Provider: string | undefined; + + /** + *The event feedback date.
+ */ + FeedbackDate?: Date; } export namespace EventFeedbackType { @@ -2553,10 +2553,9 @@ export enum RiskLevelType { */ export interface EventRiskType { /** - *Indicates whether compromised credentials were detected during an authentication - * event.
+ *The risk decision.
*/ - CompromisedCredentialsDetected?: boolean; + RiskDecision?: RiskDecisionType | string; /** *The risk level.
@@ -2564,9 +2563,10 @@ export interface EventRiskType { RiskLevel?: RiskLevelType | string; /** - *The risk decision.
+ *Indicates whether compromised credentials were detected during an authentication + * event.
*/ - RiskDecision?: RiskDecisionType | string; + CompromisedCredentialsDetected?: boolean; } export namespace EventRiskType { @@ -2586,46 +2586,46 @@ export enum EventType { */ export interface AuthEventType { /** - *The user context data captured at the time of an event request. It provides additional - * information about the client from which event the request is received.
+ *The event ID.
*/ - EventContextData?: EventContextDataType; + EventId?: string; /** - *A flag specifying the user feedback captured at the time of an event request is good - * or bad.
+ *The event type.
*/ - EventFeedback?: EventFeedbackType; + EventType?: EventType | string; /** - *The event type.
+ *The creation date
*/ - EventType?: EventType | string; + CreationDate?: Date; /** - *The event ID.
+ *The event response.
*/ - EventId?: string; + EventResponse?: EventResponseType | string; /** - *The challenge responses.
+ *The event risk.
*/ - ChallengeResponses?: ChallengeResponseType[]; + EventRisk?: EventRiskType; /** - *The creation date
+ *The challenge responses.
*/ - CreationDate?: Date; + ChallengeResponses?: ChallengeResponseType[]; /** - *The event risk.
+ *The user context data captured at the time of an event request. It provides additional + * information about the client from which event the request is received.
*/ - EventRisk?: EventRiskType; + EventContextData?: EventContextDataType; /** - *The event response.
+ *A flag specifying the user feedback captured at the time of an event request is good + * or bad.
*/ - EventResponse?: EventResponseType | string; + EventFeedback?: EventFeedbackType; } export namespace AuthEventType { @@ -2670,6 +2670,11 @@ export namespace UserPoolAddOnNotEnabledException { } export interface AdminRemoveUserFromGroupRequest { + /** + *The user pool ID for the user pool.
+ */ + UserPoolId: string | undefined; + /** *The username for the user.
*/ @@ -2679,11 +2684,6 @@ export interface AdminRemoveUserFromGroupRequest { *The group name.
*/ GroupName: string | undefined; - - /** - *The user pool ID for the user pool.
- */ - UserPoolId: string | undefined; } export namespace AdminRemoveUserFromGroupRequest { @@ -2697,6 +2697,16 @@ export namespace AdminRemoveUserFromGroupRequest { *Represents the request to reset a user's password as an administrator.
*/ export interface AdminResetUserPasswordRequest { + /** + *The user pool ID for the user pool where you want to reset the user's password.
+ */ + UserPoolId: string | undefined; + + /** + *The user name of the user whose password you wish to reset.
+ */ + Username: string | undefined; + /** *A map of custom key-value pairs that you can provide as input for any custom workflows * that this action triggers.
@@ -2732,16 +2742,6 @@ export interface AdminResetUserPasswordRequest { * */ ClientMetadata?: { [key: string]: string }; - - /** - *The user pool ID for the user pool where you want to reset the user's password.
- */ - UserPoolId: string | undefined; - - /** - *The user name of the user whose password you wish to reset.
- */ - Username: string | undefined; } export namespace AdminResetUserPasswordRequest { @@ -2788,72 +2788,20 @@ export namespace InvalidEmailRoleAccessPolicyException { */ export interface AdminRespondToAuthChallengeRequest { /** - *The session which should be passed both ways in challenge-response calls to the
- * service. If InitiateAuth
or RespondToAuthChallenge
API call
- * determines that the caller needs to go through another challenge, they return a session
- * with other challenge parameters. This session should be passed as it is to the next
- * RespondToAuthChallenge
API call.
The ID of the Amazon Cognito user pool.
*/ - Session?: string; + UserPoolId: string | undefined; /** - *A map of custom key-value pairs that you can provide as input for any custom workflows - * that this action triggers.
- *You create custom workflows by assigning AWS Lambda functions to user pool triggers.
- * When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any
- * functions that are assigned to the following triggers: pre sign-up,
- * custom message, post authentication,
- * user migration, pre token generation,
- * define auth challenge, create auth
- * challenge, and verify auth challenge response. When
- * Amazon Cognito invokes any of these functions, it passes a JSON payload, which the
- * function receives as input. This payload contains a clientMetadata
- * attribute, which provides the data that you assigned to the ClientMetadata parameter in
- * your AdminRespondToAuthChallenge request. In your function code in AWS Lambda, you can
- * process the clientMetadata
value to enhance your workflow for your specific
- * needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the - * Amazon Cognito Developer Guide.
- *Take the following limitations into consideration when you use the ClientMetadata - * parameter:
- *Amazon Cognito does not store the ClientMetadata value. This data is - * available only to AWS Lambda triggers that are assigned to a user pool to - * support custom workflows. If your user pool configuration does not include - * triggers, the ClientMetadata parameter serves no purpose.
- *Amazon Cognito does not validate the ClientMetadata value.
- *Amazon Cognito does not encrypt the the ClientMetadata value, so don't use - * it to provide sensitive information.
- *The app client ID.
*/ - ClientMetadata?: { [key: string]: string }; + ClientId: string | undefined; /** *The challenge name. For more information, see AdminInitiateAuth.
*/ ChallengeName: ChallengeNameType | string | undefined; - /** - *The analytics metadata for collecting Amazon Pinpoint metrics for
- * AdminRespondToAuthChallenge
calls.
Contextual data such as the user's device fingerprint, IP address, or location used - * for evaluating the risk of an unexpected event by Amazon Cognito advanced - * security.
- */ - ContextData?: ContextDataType; - /** *The challenge responses. These are inputs corresponding to the value of
* ChallengeName
, for example:
The app client ID.
+ *The session which should be passed both ways in challenge-response calls to the
+ * service. If InitiateAuth
or RespondToAuthChallenge
API call
+ * determines that the caller needs to go through another challenge, they return a session
+ * with other challenge parameters. This session should be passed as it is to the next
+ * RespondToAuthChallenge
API call.
The ID of the Amazon Cognito user pool.
+ *The analytics metadata for collecting Amazon Pinpoint metrics for
+ * AdminRespondToAuthChallenge
calls.
Contextual data such as the user's device fingerprint, IP address, or location used + * for evaluating the risk of an unexpected event by Amazon Cognito advanced + * security.
+ */ + ContextData?: ContextDataType; + + /** + *A map of custom key-value pairs that you can provide as input for any custom workflows + * that this action triggers.
+ *You create custom workflows by assigning AWS Lambda functions to user pool triggers.
+ * When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any
+ * functions that are assigned to the following triggers: pre sign-up,
+ * custom message, post authentication,
+ * user migration, pre token generation,
+ * define auth challenge, create auth
+ * challenge, and verify auth challenge response. When
+ * Amazon Cognito invokes any of these functions, it passes a JSON payload, which the
+ * function receives as input. This payload contains a clientMetadata
+ * attribute, which provides the data that you assigned to the ClientMetadata parameter in
+ * your AdminRespondToAuthChallenge request. In your function code in AWS Lambda, you can
+ * process the clientMetadata
value to enhance your workflow for your specific
+ * needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the + * Amazon Cognito Developer Guide.
+ *Take the following limitations into consideration when you use the ClientMetadata + * parameter:
+ *Amazon Cognito does not store the ClientMetadata value. This data is + * available only to AWS Lambda triggers that are assigned to a user pool to + * support custom workflows. If your user pool configuration does not include + * triggers, the ClientMetadata parameter serves no purpose.
+ *Amazon Cognito does not validate the ClientMetadata value.
+ *Amazon Cognito does not encrypt the the ClientMetadata value, so don't use + * it to provide sensitive information.
+ *Responds to the authentication challenge, as an administrator.
*/ export interface AdminRespondToAuthChallengeResponse { + /** + *The name of the challenge. For more information, see AdminInitiateAuth.
+ */ + ChallengeName?: ChallengeNameType | string; + /** *The session which should be passed both ways in challenge-response calls to the * service. If the caller needs to @@ -2928,11 +2933,6 @@ export interface AdminRespondToAuthChallengeResponse { */ ChallengeParameters?: { [key: string]: string }; - /** - *
The name of the challenge. For more information, see AdminInitiateAuth.
- */ - ChallengeName?: ChallengeNameType | string; - /** *The result returned by the server in response to the authentication request.
*/ @@ -3002,11 +3002,15 @@ export namespace SoftwareTokenMFANotFoundException { } /** - *The type used for enabling SMS MFA at the user level.
+ *The type used for enabling SMS MFA at the user level. Phone numbers don't need to be verified + * to be used for SMS MFA. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, + * unless device tracking is turned on and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, + * disable MFA for users and turn on Adaptive Authentication for the user pool.
*/ export interface SMSMfaSettingsType { /** - *Specifies whether SMS text message MFA is enabled.
+ *Specifies whether SMS text message MFA is enabled. If an MFA type is enabled for a user, + * the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on and the device has been trusted.
*/ Enabled?: boolean; @@ -3023,18 +3027,21 @@ export namespace SMSMfaSettingsType { } /** - *The type used for enabling software token MFA at the user level.
+ *The type used for enabling software token MFA at the user level. If an MFA type is enabled for a user, the user will be prompted for MFA during all sign in attempts, unless device tracking is turned on + * and the device has been trusted. If you would like MFA to be applied selectively based on the assessed risk level of sign in attempts, + * disable MFA for users and turn on Adaptive Authentication for the user pool.
*/ export interface SoftwareTokenMfaSettingsType { /** - *Specifies whether software token MFA is the preferred MFA method.
+ *Specifies whether software token MFA is enabled. If an MFA type is enabled for a user, the user will be prompted for MFA during + * all sign in attempts, unless device tracking is turned on and the device has been trusted.
*/ - PreferredMfa?: boolean; + Enabled?: boolean; /** - *Specifies whether software token MFA is enabled.
+ *Specifies whether software token MFA is the preferred MFA method.
*/ - Enabled?: boolean; + PreferredMfa?: boolean; } export namespace SoftwareTokenMfaSettingsType { @@ -3044,6 +3051,11 @@ export namespace SoftwareTokenMfaSettingsType { } export interface AdminSetUserMFAPreferenceRequest { + /** + *The SMS text message MFA settings.
+ */ + SMSMfaSettings?: SMSMfaSettingsType; + /** *The time-based one-time password software token MFA settings.
*/ @@ -3058,12 +3070,7 @@ export interface AdminSetUserMFAPreferenceRequest { *The user pool ID.
*/ UserPoolId: string | undefined; - - /** - *The SMS text message MFA settings.
- */ - SMSMfaSettings?: SMSMfaSettingsType; -} +} export namespace AdminSetUserMFAPreferenceRequest { export const filterSensitiveLog = (obj: AdminSetUserMFAPreferenceRequest): any => ({ @@ -3081,11 +3088,6 @@ export namespace AdminSetUserMFAPreferenceResponse { } export interface AdminSetUserPasswordRequest { - /** - *The password for the user.
- */ - Password: string | undefined; - /** *The user pool ID for the user pool where you want to set the user's password.
*/ @@ -3096,6 +3098,11 @@ export interface AdminSetUserPasswordRequest { */ Username: string | undefined; + /** + *The password for the user.
+ */ + Password: string | undefined; + /** *
* True
if the password is permanent, False
if it is
@@ -3107,8 +3114,8 @@ export interface AdminSetUserPasswordRequest {
export namespace AdminSetUserPasswordRequest {
export const filterSensitiveLog = (obj: AdminSetUserPasswordRequest): any => ({
...obj,
- ...(obj.Password && { Password: SENSITIVE_STRING }),
...(obj.Username && { Username: SENSITIVE_STRING }),
+ ...(obj.Password && { Password: SENSITIVE_STRING }),
});
}
@@ -3125,6 +3132,12 @@ export namespace AdminSetUserPasswordResponse {
* medium.
The ID of the user pool that contains the user that you are setting options + * for.
+ */ + UserPoolId: string | undefined; + /** *The user name of the user that you are setting options for.
*/ @@ -3135,12 +3148,6 @@ export interface AdminSetUserSettingsRequest { * delivery. */ MFAOptions: MFAOptionType[] | undefined; - - /** - *The ID of the user pool that contains the user that you are setting options - * for.
- */ - UserPoolId: string | undefined; } export namespace AdminSetUserSettingsRequest { @@ -3174,14 +3181,14 @@ export interface AdminUpdateAuthEventFeedbackRequest { Username: string | undefined; /** - *The authentication event feedback value.
+ *The authentication event ID.
*/ - FeedbackValue: FeedbackValueType | string | undefined; + EventId: string | undefined; /** - *The authentication event ID.
+ *The authentication event feedback value.
*/ - EventId: string | undefined; + FeedbackValue: FeedbackValueType | string | undefined; } export namespace AdminUpdateAuthEventFeedbackRequest { @@ -3209,24 +3216,24 @@ export enum DeviceRememberedStatusType { */ export interface AdminUpdateDeviceStatusRequest { /** - *The status indicating whether a device has been remembered or not.
+ *The user pool ID.
*/ - DeviceRememberedStatus?: DeviceRememberedStatusType | string; + UserPoolId: string | undefined; /** - *The device key.
+ *The user name.
*/ - DeviceKey: string | undefined; + Username: string | undefined; /** - *The user name.
+ *The device key.
*/ - Username: string | undefined; + DeviceKey: string | undefined; /** - *The user pool ID.
+ *The status indicating whether a device has been remembered or not.
*/ - UserPoolId: string | undefined; + DeviceRememberedStatus?: DeviceRememberedStatusType | string; } export namespace AdminUpdateDeviceStatusRequest { @@ -3252,14 +3259,14 @@ export namespace AdminUpdateDeviceStatusResponse { */ export interface AdminUpdateUserAttributesRequest { /** - *The user name of the user for whom you want to update user attributes.
+ *The user pool ID for the user pool where you want to update user attributes.
*/ - Username: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID for the user pool where you want to update user attributes.
+ *The user name of the user for whom you want to update user attributes.
*/ - UserPoolId: string | undefined; + Username: string | undefined; /** *An array of name-value pairs representing user attributes.
@@ -3383,6 +3390,11 @@ export enum AliasAttributeType { * */ export interface AnalyticsConfigurationType { + /** + *The application ID for an Amazon Pinpoint application.
+ */ + ApplicationId?: string; + /** *The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use the Amazon Pinpoint * project for Pinpoint integration with the chosen User Pool Client. @@ -3390,11 +3402,6 @@ export interface AnalyticsConfigurationType { */ ApplicationArn?: string; - /** - *
The external ID.
- */ - ExternalId?: string; - /** *The ARN of an IAM role that authorizes Amazon Cognito to publish events to Amazon * Pinpoint analytics.
@@ -3402,15 +3409,15 @@ export interface AnalyticsConfigurationType { RoleArn?: string; /** - *If UserDataShared
is true
, Amazon Cognito will include user
- * data in the events it publishes to Amazon Pinpoint analytics.
The external ID.
*/ - UserDataShared?: boolean; + ExternalId?: string; /** - *The application ID for an Amazon Pinpoint application.
+ *If UserDataShared
is true
, Amazon Cognito will include user
+ * data in the events it publishes to Amazon Pinpoint analytics.
The session which should be passed both ways in challenge-response calls to the - * service. This allows authentication of the user as part of the MFA setup process.
- */ - Session?: string; - /** *A unique generated shared secret code that is used in the TOTP algorithm to generate a * one time code.
*/ SecretCode?: string; + + /** + *The session which should be passed both ways in challenge-response calls to the + * service. This allows authentication of the user as part of the MFA setup process.
+ */ + Session?: string; } export namespace AssociateSoftwareTokenResponse { @@ -3483,11 +3490,6 @@ export namespace ConcurrentModificationException { *Represents the request to change a user password.
*/ export interface ChangePasswordRequest { - /** - *The access token.
- */ - AccessToken: string | undefined; - /** *The old password.
*/ @@ -3497,14 +3499,19 @@ export interface ChangePasswordRequest { *The new password.
*/ ProposedPassword: string | undefined; + + /** + *The access token.
+ */ + AccessToken: string | undefined; } export namespace ChangePasswordRequest { export const filterSensitiveLog = (obj: ChangePasswordRequest): any => ({ ...obj, - ...(obj.AccessToken && { AccessToken: SENSITIVE_STRING }), ...(obj.PreviousPassword && { PreviousPassword: SENSITIVE_STRING }), ...(obj.ProposedPassword && { ProposedPassword: SENSITIVE_STRING }), + ...(obj.AccessToken && { AccessToken: SENSITIVE_STRING }), }); } @@ -3524,14 +3531,14 @@ export namespace ChangePasswordResponse { */ export interface DeviceSecretVerifierConfigType { /** - *The salt.
+ *The password verifier.
*/ - Salt?: string; + PasswordVerifier?: string; /** - *The password verifier.
+ *The salt.
*/ - PasswordVerifier?: string; + Salt?: string; } export namespace DeviceSecretVerifierConfigType { @@ -3555,14 +3562,14 @@ export interface ConfirmDeviceRequest { DeviceKey: string | undefined; /** - *The device name.
+ *The configuration of the device secret verifier.
*/ - DeviceName?: string; + DeviceSecretVerifierConfig?: DeviceSecretVerifierConfigType; /** - *The configuration of the device secret verifier.
+ *The device name.
*/ - DeviceSecretVerifierConfig?: DeviceSecretVerifierConfigType; + DeviceName?: string; } export namespace ConfirmDeviceRequest { @@ -3613,6 +3620,17 @@ export namespace UserContextDataType { *The request representing the confirmation for a password reset.
*/ export interface ConfirmForgotPasswordRequest { + /** + *The app client ID of the app associated with the user pool.
+ */ + ClientId: string | undefined; + + /** + *A keyed-hash message authentication code (HMAC) calculated using the secret key of a + * user pool client and username plus the client ID in the message.
+ */ + SecretHash?: string; + /** *The user name of the user for whom you want to enter a code to retrieve a forgotten * password.
@@ -3620,22 +3638,28 @@ export interface ConfirmForgotPasswordRequest { Username: string | undefined; /** - *Contextual data such as the user's device fingerprint, IP address, or location used - * for evaluating the risk of an unexpected event by Amazon Cognito advanced - * security.
+ *The confirmation code sent by a user's request to retrieve a forgotten password. For + * more information, see ForgotPassword.
*/ - UserContextData?: UserContextDataType; + ConfirmationCode: string | undefined; /** - *The app client ID of the app associated with the user pool.
+ *The password sent by a user's request to retrieve a forgotten password.
*/ - ClientId: string | undefined; + Password: string | undefined; /** - *A keyed-hash message authentication code (HMAC) calculated using the secret key of a - * user pool client and username plus the client ID in the message.
+ *The Amazon Pinpoint analytics metadata for collecting metrics for
+ * ConfirmForgotPassword
calls.
Contextual data such as the user's device fingerprint, IP address, or location used + * for evaluating the risk of an unexpected event by Amazon Cognito advanced + * security.
+ */ + UserContextData?: UserContextDataType; /** *A map of custom key-value pairs that you can provide as input for any custom workflows @@ -3672,31 +3696,14 @@ export interface ConfirmForgotPasswordRequest { * */ ClientMetadata?: { [key: string]: string }; - - /** - *
The confirmation code sent by a user's request to retrieve a forgotten password. For - * more information, see ForgotPassword.
- */ - ConfirmationCode: string | undefined; - - /** - *The password sent by a user's request to retrieve a forgotten password.
- */ - Password: string | undefined; - - /** - *The Amazon Pinpoint analytics metadata for collecting metrics for
- * ConfirmForgotPassword
calls.
Represents the request to confirm registration of a user.
*/ export interface ConfirmSignUpRequest { - /** - *The user name of the user whose registration you wish to confirm.
- */ - Username: string | undefined; - /** *The ID of the app client associated with the user pool.
*/ @@ -3734,23 +3736,38 @@ export interface ConfirmSignUpRequest { SecretHash?: string; /** - *Contextual data such as the user's device fingerprint, IP address, or location used - * for evaluating the risk of an unexpected event by Amazon Cognito advanced - * security.
+ *The user name of the user whose registration you wish to confirm.
*/ - UserContextData?: UserContextDataType; + Username: string | undefined; /** *The confirmation code sent by a user's request to confirm registration.
*/ ConfirmationCode: string | undefined; + /** + *Boolean to be specified to force user confirmation irrespective of existing alias. By
+ * default set to False
. If this parameter is set to True
and the
+ * phone number/email used for sign up confirmation already exists as an alias with a
+ * different user, the API call will migrate the alias from the previous user to the newly
+ * created user being confirmed. If set to False
, the API will throw an
+ * AliasExistsException error.
The Amazon Pinpoint analytics metadata for collecting metrics for
* ConfirmSignUp
calls.
Contextual data such as the user's device fingerprint, IP address, or location used + * for evaluating the risk of an unexpected event by Amazon Cognito advanced + * security.
+ */ + UserContextData?: UserContextDataType; + /** *A map of custom key-value pairs that you can provide as input for any custom workflows * that this action triggers.
@@ -3785,24 +3802,14 @@ export interface ConfirmSignUpRequest { * */ ClientMetadata?: { [key: string]: string }; - - /** - *Boolean to be specified to force user confirmation irrespective of existing alias. By
- * default set to False
. If this parameter is set to True
and the
- * phone number/email used for sign up confirmation already exists as an alias with a
- * different user, the API call will migrate the alias from the previous user to the newly
- * created user being confirmed. If set to False
, the API will throw an
- * AliasExistsException error.
The user pool ID for the user pool.
+ */ + UserPoolId: string | undefined; + + /** + *A string containing the description of the group.
+ */ + Description?: string; + + /** + *The role ARN for the group.
+ */ + RoleArn?: string; + /** *A nonnegative integer value that specifies the precedence of this group relative to * the other groups that a user can belong to in the user pool. Zero is the highest @@ -3840,21 +3862,6 @@ export interface CreateGroupRequest { *
The default Precedence
value is null.
The user pool ID for the user pool.
- */ - UserPoolId: string | undefined; - - /** - *The role ARN for the group.
- */ - RoleArn?: string; - - /** - *A string containing the description of the group.
- */ - Description?: string; } export namespace CreateGroupRequest { @@ -3903,25 +3910,19 @@ export enum IdentityProviderTypeType { export interface CreateIdentityProviderRequest { /** - *The identity provider name.
+ *The user pool ID.
*/ - ProviderName: string | undefined; + UserPoolId: string | undefined; /** - *A mapping of identity provider attributes to standard and custom user pool - * attributes.
+ *The identity provider name.
*/ - AttributeMapping?: { [key: string]: string }; + ProviderName: string | undefined; /** - *A list of identity provider identifiers.
+ *The identity provider type.
*/ - IdpIdentifiers?: string[]; - - /** - *The user pool ID.
- */ - UserPoolId: string | undefined; + ProviderType: IdentityProviderTypeType | string | undefined; /** *The identity provider details. The following list describes the provider detail keys @@ -4035,9 +4036,15 @@ export interface CreateIdentityProviderRequest { ProviderDetails: { [key: string]: string } | undefined; /** - *
The identity provider type.
+ *A mapping of identity provider attributes to standard and custom user pool + * attributes.
*/ - ProviderType: IdentityProviderTypeType | string | undefined; + AttributeMapping?: { [key: string]: string }; + + /** + *A list of identity provider identifiers.
+ */ + IdpIdentifiers?: string[]; } export namespace CreateIdentityProviderRequest { @@ -4060,27 +4067,6 @@ export interface IdentityProviderType { */ ProviderName?: string; - /** - *The date the identity provider was last modified.
- */ - LastModifiedDate?: Date; - - /** - *The date the identity provider was created.
- */ - CreationDate?: Date; - - /** - *A mapping of identity provider attributes to standard and custom user pool - * attributes.
- */ - AttributeMapping?: { [key: string]: string }; - - /** - *A list of identity provider identifiers.
- */ - IdpIdentifiers?: string[]; - /** *The identity provider type.
*/ @@ -4199,6 +4185,27 @@ export interface IdentityProviderType { * */ ProviderDetails?: { [key: string]: string }; + + /** + *A mapping of identity provider attributes to standard and custom user pool + * attributes.
+ */ + AttributeMapping?: { [key: string]: string }; + + /** + *A list of identity provider identifiers.
+ */ + IdpIdentifiers?: string[]; + + /** + *The date the identity provider was last modified.
+ */ + LastModifiedDate?: Date; + + /** + *The date the identity provider was created.
+ */ + CreationDate?: Date; } export namespace IdentityProviderType { @@ -4258,17 +4265,6 @@ export namespace ResourceServerScopeType { } export interface CreateResourceServerRequest { - /** - *A friendly name for the resource server.
- */ - Name: string | undefined; - - /** - *A list of scopes. Each scope is map, where the keys are name
and
- * description
.
The user pool ID for the user pool.
*/ @@ -4280,6 +4276,17 @@ export interface CreateResourceServerRequest { *https://my-weather-api.example.com
.
*/
Identifier: string | undefined;
+
+ /**
+ * A friendly name for the resource server.
+ */ + Name: string | undefined; + + /** + *A list of scopes. Each scope is map, where the keys are name
and
+ * description
.
The identifier for the resource server.
+ *The user pool ID for the user pool that hosts the resource server.
*/ - Identifier?: string; + UserPoolId?: string; /** - *A list of scopes that are defined for the resource server.
+ *The identifier for the resource server.
*/ - Scopes?: ResourceServerScopeType[]; + Identifier?: string; /** - *The user pool ID for the user pool that hosts the resource server.
+ *The name of the resource server.
*/ - UserPoolId?: string; + Name?: string; /** - *The name of the resource server.
+ *A list of scopes that are defined for the resource server.
*/ - Name?: string; + Scopes?: ResourceServerScopeType[]; } export namespace ResourceServerType { @@ -4336,6 +4343,11 @@ export namespace CreateResourceServerResponse { *Represents the request to create the user import job.
*/ export interface CreateUserImportJobRequest { + /** + *The job name for the user import job.
+ */ + JobName: string | undefined; + /** *The user pool ID for the user pool that the users are being imported into.
*/ @@ -4345,11 +4357,6 @@ export interface CreateUserImportJobRequest { *The role ARN for the Amazon CloudWatch Logging role for the user import job.
*/ CloudWatchLogsRoleArn: string | undefined; - - /** - *The job name for the user import job.
- */ - JobName: string | undefined; } export namespace CreateUserImportJobRequest { @@ -4374,67 +4381,40 @@ export enum UserImportJobStatusType { */ export interface UserImportJobType { /** - *The date when the user import job was started.
+ *The job name for the user import job.
*/ - StartDate?: Date; + JobName?: string; /** *The job ID for the user import job.
*/ JobId?: string; - /** - *The number of users that could not be imported.
- */ - FailedUsers?: number; - - /** - *The date the user import job was created.
- */ - CreationDate?: Date; - /** *The user pool ID for the user pool that the users are being imported into.
*/ UserPoolId?: string; - /** - *The number of users that were skipped.
- */ - SkippedUsers?: number; - - /** - *The role ARN for the Amazon CloudWatch Logging role for the user import job. For more - * information, see "Creating the CloudWatch Logs IAM Role" in the Amazon Cognito Developer - * Guide.
- */ - CloudWatchLogsRoleArn?: string; - /** *The pre-signed URL to be used to upload the .csv
file.
The number of users that were successfully imported.
+ *The date the user import job was created.
*/ - ImportedUsers?: number; + CreationDate?: Date; /** - *The message returned when the user import job is completed.
+ *The date when the user import job was started.
*/ - CompletionMessage?: string; + StartDate?: Date; /** *The date when the user import job was completed.
*/ CompletionDate?: Date; - /** - *The job name for the user import job.
- */ - JobName?: string; - /** *The status of the user import job. One of the following:
*The role ARN for the Amazon CloudWatch Logging role for the user import job. For more + * information, see "Creating the CloudWatch Logs IAM Role" in the Amazon Cognito Developer + * Guide.
+ */ + CloudWatchLogsRoleArn?: string; + + /** + *The number of users that were successfully imported.
+ */ + ImportedUsers?: number; + + /** + *The number of users that were skipped.
+ */ + SkippedUsers?: number; + + /** + *The number of users that could not be imported.
+ */ + FailedUsers?: number; + + /** + *The message returned when the user import job is completed.
+ */ + CompletionMessage?: string; } export namespace UserImportJobType { @@ -4513,16 +4520,16 @@ export enum VerifiedAttributeType { *The configuration for the user pool's device tracking.
*/ export interface DeviceConfigurationType { - /** - *If true, a device is only remembered on user prompt.
- */ - DeviceOnlyRememberedOnUserPrompt?: boolean; - /** *Indicates whether a challenge is required on a new device. Only applicable to a new * device.
*/ ChallengeRequiredOnNewDevice?: boolean; + + /** + *If true, a device is only remembered on user prompt.
+ */ + DeviceOnlyRememberedOnUserPrompt?: boolean; } export namespace DeviceConfigurationType { @@ -4537,9 +4544,36 @@ export enum EmailSendingAccountType { } /** - *The email configuration type.
+ *The email configuration type.
+ *Amazon Cognito has specific regions for use with Amazon SES. For more information on the supported regions, see + * Email Settings for Amazon Cognito User Pools.
+ *The Amazon Resource Name (ARN) of a verified email address in Amazon SES. This email
+ * address is used in one of the following ways, depending on the value that you specify
+ * for the EmailSendingAccount
parameter:
If you specify COGNITO_DEFAULT
, Amazon Cognito uses this address
+ * as the custom FROM address when it emails your users by using its built-in email
+ * account.
If you specify DEVELOPER
, Amazon Cognito emails your users with
+ * this address by calling Amazon SES on your behalf.
The destination to which the receiver of the email should reply to.
+ */ + ReplyToEmailAddress?: string; + /** *Specifies whether Amazon Cognito emails your users by using its built-in email * functionality or your Amazon SES email configuration. Specify one of the following @@ -4553,11 +4587,46 @@ export interface EmailConfigurationType { * environments, the default email limit is below the required delivery volume. * To achieve a higher delivery volume, specify DEVELOPER to use your Amazon * SES email configuration.
+ * *To look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito * Developer Guide.
*The default FROM address is no-reply@verificationemail.com. To customize
* the FROM address, provide the ARN of an Amazon SES verified email address
* for the SourceArn
parameter.
+ * If EmailSendingAccount is COGNITO_DEFAULT, the following parameters aren't allowed:
+ * + *EmailVerificationMessage
+ *EmailVerificationSubject
+ *InviteMessageTemplate.EmailMessage
+ *InviteMessageTemplate.EmailSubject
+ *VerificationMessageTemplate.EmailMessage
+ *VerificationMessageTemplate.EmailMessageByLink
+ *VerificationMessageTemplate.EmailSubject,
+ *VerificationMessageTemplate.EmailSubjectByLink
+ *DEVELOPER EmailSendingAccount is required.
+ *The destination to which the receiver of the email should reply to.
- */ - ReplyToEmailAddress?: string; - /** *Identifies either the sender’s email address or the sender’s name with their email
* address. For example, The Amazon Resource Name (ARN) of a verified email address in Amazon SES. This email
- * address is used in one of the following ways, depending on the value that you specify
- * for the If you specify If you specify The set of configuration rules that can be applied to emails sent using Amazon SES. A
* configuration set is applied to an email by including a reference to the configuration
@@ -4642,39 +4688,71 @@ export namespace EmailConfigurationType {
});
}
+export enum CustomEmailSenderLambdaVersionType {
+ V1_0 = "V1_0",
+}
+
/**
- * Specifies the configuration for AWS Lambda triggers. A custom email sender Lambda configuration type. The user migration Lambda config type. The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom email Lambda function.
+ * The only supported value is Defines the authentication challenge. The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send email notifications to users. Verifies the authentication challenge response. A custom SMS sender Lambda configuration type. A pre-authentication AWS Lambda trigger. The Lambda version represents the signature of the "request" attribute in the "event" information Amazon Cognito passes to your custom SMS Lambda function.
+ * The only supported value is The Lambda Amazon Resource Name of the Lambda function that Amazon Cognito triggers to send SMS notifications to users. Specifies the configuration for AWS Lambda triggers. A pre-registration AWS Lambda trigger. A post-authentication AWS Lambda trigger. A custom Message AWS Lambda trigger. A post-confirmation AWS Lambda trigger. A custom Message AWS Lambda trigger. A pre-authentication AWS Lambda trigger. A Lambda trigger that is invoked before token generation. A post-authentication AWS Lambda trigger. Defines the authentication challenge. Creates an authentication challenge. Verifies the authentication challenge response. A Lambda trigger that is invoked before token generation. The user migration Lambda config type. A custom SMS sender AWS Lambda trigger. A custom email sender AWS Lambda trigger. The Amazon Resource Name of Key Management Service Customer master keys
+ * . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to
+ * In the password policy you have set, refers to the number of days a temporary password
- * is valid. If the user does not sign-in during this time, their password will need to be
- * reset by an administrator. When you set The minimum length of the password policy that you have set. Cannot be less than
+ * 6. In the password policy that you have set, refers to whether you have required users to
@@ -4737,12 +4846,6 @@ export interface PasswordPolicyType {
*/
RequireLowercase?: boolean;
- /**
- * The minimum length of the password policy that you have set. Cannot be less than
- * 6. In the password policy that you have set, refers to whether you have required users to
* use at least one number in their password.testuser@example.com
or Test User
@@ -4595,24 +4659,6 @@ export interface EmailConfigurationType {
*/
From?: string;
- /**
- *
EmailSendingAccount
parameter:
- *
- */
- SourceArn?: string;
-
/**
* COGNITO_DEFAULT
, Amazon Cognito uses this address
- * as the custom FROM address when it emails your users by using its built-in email
- * account.DEVELOPER
, Amazon Cognito emails your users with
- * this address by calling Amazon SES on your behalf.V1_0
.V1_0
.CustomEmailSender
and CustomSMSSender
.TemporaryPasswordValidityDays
for a user pool, you will
- * no longer be able to set the deprecated UnusedAccountValidityDays
value
- * for that user pool.
In the password policy you have set, refers to the number of days a temporary password + * is valid. If the user does not sign-in during this time, their password will need to be + * reset by an administrator.
+ *When you set TemporaryPasswordValidityDays
for a user pool, you will
+ * no longer be able to set the deprecated UnusedAccountValidityDays
value
+ * for that user pool.
The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. * This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS - * messages.
+ * messages. SMS messages are subject to a spending limit. + * */ SnsCallerArn: string | undefined; @@ -4880,36 +4996,42 @@ export enum DefaultEmailOptionType { *The template for verification messages.
*/ export interface VerificationMessageTemplateType { - /** - *The email message template for sending a confirmation link to the user.
- */ - EmailMessageByLink?: string; - /** *The SMS message template.
*/ SmsMessage?: string; /** - *The default email option.
+ *The email message template. EmailMessage is allowed only if + * EmailSendingAccount is DEVELOPER. + *
*/ - DefaultEmailOption?: DefaultEmailOptionType | string; + EmailMessage?: string; /** - *The subject line for the email message template.
+ *The subject line for the email message template. EmailSubject is allowed only if EmailSendingAccount + * is DEVELOPER. + *
*/ EmailSubject?: string; + /** + *The email message template for sending a confirmation link to the user. EmailMessageByLink is allowed only if + * EmailSendingAccount is DEVELOPER.
+ */ + EmailMessageByLink?: string; + /** *The subject line for the email message template for sending a confirmation link to the - * user.
+ * user. EmailSubjectByLink is allowed only + * EmailSendingAccount is DEVELOPER. */ EmailSubjectByLink?: string; /** - *The email message template.
+ *The default email option.
*/ - EmailMessage?: string; + DefaultEmailOption?: DefaultEmailOptionType | string; } export namespace VerificationMessageTemplateType { @@ -4923,35 +5045,46 @@ export namespace VerificationMessageTemplateType { */ export interface CreateUserPoolRequest { /** - *Specifies MFA configuration details.
+ *A string used to name the user pool.
*/ - MfaConfiguration?: UserPoolMfaType | string; + PoolName: string | undefined; /** - *The configuration for AdminCreateUser
requests.
The policies associated with the new user pool.
*/ - AdminCreateUserConfig?: AdminCreateUserConfigType; + Policies?: UserPoolPolicyType; /** - *The device configuration.
+ *The Lambda trigger configuration information for the new user pool.
+ *In a push model, event sources (such as Amazon S3 and custom applications) need + * permission to invoke a function. So you will need to make an extra call to add + * permission for these event sources to invoke your Lambda function.
+ * + *For more information on using the Lambda API to add permission, see + * AddPermission .
+ *For adding permission using the AWS CLI, see add-permission + * .
+ *A string representing the email verification message.
+ *The attributes to be auto-verified. Possible values: email, phone_number.
*/ - EmailVerificationMessage?: string; + AutoVerifiedAttributes?: (VerifiedAttributeType | string)[]; /** - *The template for the verification message that the user sees when the app requests - * permission to access the user's information.
+ *Attributes supported as an alias for this user pool. Possible values: phone_number, email, or + * preferred_username.
*/ - VerificationMessageTemplate?: VerificationMessageTemplateType; + AliasAttributes?: (AliasAttributeType | string)[]; /** - *A string representing the SMS authentication message.
+ *Specifies whether email addresses or phone numbers can be specified as usernames when + * a user signs up.
*/ - SmsAuthenticationMessage?: string; + UsernameAttributes?: (UsernameAttributeType | string)[]; /** *A string representing the SMS verification message.
@@ -4959,65 +5092,45 @@ export interface CreateUserPoolRequest { SmsVerificationMessage?: string; /** - *The Lambda trigger configuration information for the new user pool.
- *In a push model, event sources (such as Amazon S3 and custom applications) need - * permission to invoke a function. So you will need to make an extra call to add - * permission for these event sources to invoke your Lambda function.
- * - *For more information on using the Lambda API to add permission, see - * AddPermission .
- *For adding permission using the AWS CLI, see add-permission - * .
- *A string representing the email verification message. EmailVerificationMessage is allowed only if EmailSendingAccount is DEVELOPER.
*/ - LambdaConfig?: LambdaConfigType; + EmailVerificationMessage?: string; /** - *A string used to name the user pool.
+ *A string representing the email verification subject. EmailVerificationSubject is allowed only if EmailSendingAccount is DEVELOPER.
*/ - PoolName: string | undefined; + EmailVerificationSubject?: string; /** - *Attributes supported as an alias for this user pool. Possible values: phone_number, email, or - * preferred_username.
+ *The template for the verification message that the user sees when the app requests + * permission to access the user's information.
*/ - AliasAttributes?: (AliasAttributeType | string)[]; + VerificationMessageTemplate?: VerificationMessageTemplateType; /** - *You can choose to set case sensitivity on the username input for the selected sign-in
- * option. For example, when this is set to False
, users will be able to sign
- * in using either "username" or "Username". This configuration is immutable once it has
- * been set. For more information, see UsernameConfigurationType.
A string representing the SMS authentication message.
*/ - UsernameConfiguration?: UsernameConfigurationType; + SmsAuthenticationMessage?: string; /** - *The SMS configuration.
+ *Specifies MFA configuration details.
*/ - SmsConfiguration?: SmsConfigurationType; + MfaConfiguration?: UserPoolMfaType | string; /** - *Used to enable advanced security risk detection. Set the key
- * AdvancedSecurityMode
to the value "AUDIT".
The device configuration.
*/ - UserPoolAddOns?: UserPoolAddOnsType; + DeviceConfiguration?: DeviceConfigurationType; /** - *Specifies whether email addresses or phone numbers can be specified as usernames when - * a user signs up.
+ *The email configuration.
*/ - UsernameAttributes?: (UsernameAttributeType | string)[]; + EmailConfiguration?: EmailConfigurationType; /** - *Use this setting to define which verified available method a user can use to recover
- * their password when they call ForgotPassword
. It allows you to define a
- * preferred method when a user has more than one method available. With this setting, SMS
- * does not qualify for a valid password recovery mechanism if the user also has SMS MFA
- * enabled. In the absence of this setting, Cognito uses the legacy behavior to determine
- * the recovery method where SMS is preferred over email.
The SMS configuration.
*/ - AccountRecoverySetting?: AccountRecoverySettingType; + SmsConfiguration?: SmsConfigurationType; /** *The tag keys and values to assign to the user pool. A tag is a label that you can use @@ -5027,30 +5140,39 @@ export interface CreateUserPoolRequest { UserPoolTags?: { [key: string]: string }; /** - *
An array of schema attributes for the new user pool. These attributes can be standard - * or custom attributes.
+ *The configuration for AdminCreateUser
requests.
The email configuration.
+ *An array of schema attributes for the new user pool. These attributes can be standard + * or custom attributes.
*/ - EmailConfiguration?: EmailConfigurationType; + Schema?: SchemaAttributeType[]; /** - *The attributes to be auto-verified. Possible values: email, phone_number.
+ *Used to enable advanced security risk detection. Set the key
+ * AdvancedSecurityMode
to the value "AUDIT".
A string representing the email verification subject.
+ *You can choose to set case sensitivity on the username input for the selected sign-in
+ * option. For example, when this is set to False
, users will be able to sign
+ * in using either "username" or "Username". This configuration is immutable once it has
+ * been set. For more information, see UsernameConfigurationType.
The policies associated with the new user pool.
+ *Use this setting to define which verified available method a user can use to recover
+ * their password when they call ForgotPassword
. It allows you to define a
+ * preferred method when a user has more than one method available. With this setting, SMS
+ * does not qualify for a valid password recovery mechanism if the user also has SMS MFA
+ * enabled. In the absence of this setting, Cognito uses the legacy behavior to determine
+ * the recovery method where SMS is preferred over email.
A custom domain name that you provide to Amazon Cognito. This parameter applies only
- * if you use a custom domain to host the sign-up and sign-in pages for your application.
- * For example: auth.example.com
.
For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.
+ *The ID of the user pool.
*/ - CustomDomain?: string; + Id?: string; /** - *The contents of the email verification message.
+ *The name of the user pool.
*/ - EmailVerificationMessage?: string; + Name?: string; /** - *The reason why the SMS configuration cannot send the messages to your users.
+ *The policies associated with the user pool.
*/ - SmsConfigurationFailure?: string; + Policies?: UserPoolPolicyType; /** - *The user pool add-ons.
+ *The AWS Lambda triggers associated with the user pool.
*/ - UserPoolAddOns?: UserPoolAddOnsType; + LambdaConfig?: LambdaConfigType; /** *The status of a user pool.
@@ -5097,47 +5216,35 @@ export interface UserPoolType { Status?: StatusType | string; /** - *The contents of the SMS authentication message.
+ *The date the user pool was last modified.
*/ - SmsAuthenticationMessage?: string; + LastModifiedDate?: Date; /** - *The reason why the email configuration cannot send the messages to your users.
+ *The date the user pool was created.
*/ - EmailConfigurationFailure?: string; + CreationDate?: Date; /** - *Use this setting to define which verified available method a user can use to recover
- * their password when they call ForgotPassword
. It allows you to define a
- * preferred method when a user has more than one method available. With this setting, SMS
- * does not qualify for a valid password recovery mechanism if the user also has SMS MFA
- * enabled. In the absence of this setting, Cognito uses the legacy behavior to determine
- * the recovery method where SMS is preferred over email.
The date the user pool was last modified.
+ *A container with the schema attributes of a user pool.
*/ - LastModifiedDate?: Date; + SchemaAttributes?: SchemaAttributeType[]; /** - *The date the user pool was created.
+ *Specifies the attributes that are auto-verified in a user pool.
*/ - CreationDate?: Date; + AutoVerifiedAttributes?: (VerifiedAttributeType | string)[]; /** - *You can choose to enable case sensitivity on the username input for the selected
- * sign-in option. For example, when this is set to False
, users will be able
- * to sign in using either "username" or "Username". This configuration is immutable once
- * it has been set. For more information, see UsernameConfigurationType.
Specifies the attributes that are aliased in a user pool.
*/ - UsernameConfiguration?: UsernameConfigurationType; + AliasAttributes?: (AliasAttributeType | string)[]; /** - *The name of the user pool.
+ *Specifies whether email addresses or phone numbers can be specified as usernames when + * a user signs up.
*/ - Name?: string; + UsernameAttributes?: (UsernameAttributeType | string)[]; /** *The contents of the SMS verification message.
@@ -5145,9 +5252,9 @@ export interface UserPoolType { SmsVerificationMessage?: string; /** - *Holds the domain prefix if the user pool has a domain associated with it.
+ *The contents of the email verification message.
*/ - Domain?: string; + EmailVerificationMessage?: string; /** *The subject of the email verification message.
@@ -5155,19 +5262,36 @@ export interface UserPoolType { EmailVerificationSubject?: string; /** - *The email configuration.
+ *The template for verification messages.
*/ - EmailConfiguration?: EmailConfigurationType; + VerificationMessageTemplate?: VerificationMessageTemplateType; /** - *A number estimating the size of the user pool.
+ *The contents of the SMS authentication message.
*/ - EstimatedNumberOfUsers?: number; + SmsAuthenticationMessage?: string; /** - *The configuration for AdminCreateUser
requests.
Can be one of the following values:
+ *
+ * OFF
- MFA tokens are not required and cannot be specified during
+ * user registration.
+ * ON
- MFA tokens are required for all user registrations. You can
+ * only specify required when you are initially creating a user pool.
+ * OPTIONAL
- Users have the option when registering to create an MFA
+ * token.
The device configuration.
@@ -5175,11 +5299,14 @@ export interface UserPoolType { DeviceConfiguration?: DeviceConfigurationType; /** - *The tags that are assigned to the user pool. A tag is a label that you can apply to - * user pools to categorize and manage them in different ways, such as by purpose, owner, - * environment, or other criteria.
+ *A number estimating the size of the user pool.
*/ - UserPoolTags?: { [key: string]: string }; + EstimatedNumberOfUsers?: number; + + /** + *The email configuration.
+ */ + EmailConfiguration?: EmailConfigurationType; /** *The SMS configuration.
@@ -5187,72 +5314,67 @@ export interface UserPoolType { SmsConfiguration?: SmsConfigurationType; /** - *The policies associated with the user pool.
+ *The tags that are assigned to the user pool. A tag is a label that you can apply to + * user pools to categorize and manage them in different ways, such as by purpose, owner, + * environment, or other criteria.
*/ - Policies?: UserPoolPolicyType; + UserPoolTags?: { [key: string]: string }; /** - *The ID of the user pool.
+ *The reason why the SMS configuration cannot send the messages to your users.
*/ - Id?: string; + SmsConfigurationFailure?: string; /** - *Specifies the attributes that are auto-verified in a user pool.
+ *The reason why the email configuration cannot send the messages to your users.
*/ - AutoVerifiedAttributes?: (VerifiedAttributeType | string)[]; + EmailConfigurationFailure?: string; /** - *The Amazon Resource Name (ARN) for the user pool.
+ *Holds the domain prefix if the user pool has a domain associated with it.
*/ - Arn?: string; + Domain?: string; /** - *Specifies whether email addresses or phone numbers can be specified as usernames when - * a user signs up.
+ *A custom domain name that you provide to Amazon Cognito. This parameter applies only
+ * if you use a custom domain to host the sign-up and sign-in pages for your application.
+ * For example: auth.example.com
.
For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.
*/ - UsernameAttributes?: (UsernameAttributeType | string)[]; + CustomDomain?: string; /** - *The template for verification messages.
+ *The configuration for AdminCreateUser
requests.
The AWS Lambda triggers associated with the user pool.
+ *The user pool add-ons.
*/ - LambdaConfig?: LambdaConfigType; + UserPoolAddOns?: UserPoolAddOnsType; /** - *Specifies the attributes that are aliased in a user pool.
+ *You can choose to enable case sensitivity on the username input for the selected
+ * sign-in option. For example, when this is set to False
, users will be able
+ * to sign in using either "username" or "Username". This configuration is immutable once
+ * it has been set. For more information, see UsernameConfigurationType.
Can be one of the following values:
- *
- * OFF
- MFA tokens are not required and cannot be specified during
- * user registration.
- * ON
- MFA tokens are required for all user registrations. You can
- * only specify required when you are initially creating a user pool.
- * OPTIONAL
- Users have the option when registering to create an MFA
- * token.
The Amazon Resource Name (ARN) for the user pool.
*/ - MfaConfiguration?: UserPoolMfaType | string; + Arn?: string; /** - *A container with the schema attributes of a user pool.
+ *Use this setting to define which verified available method a user can use to recover
+ * their password when they call ForgotPassword
. It allows you to define a
+ * preferred method when a user has more than one method available. With this setting, SMS
+ * does not qualify for a valid password recovery mechanism if the user also has SMS MFA
+ * enabled. In the absence of this setting, Cognito uses the legacy behavior to determine
+ * the recovery method where SMS is preferred over email.
Represents the request to create a user pool client.
*/ export interface CreateUserPoolClientRequest { + /** + *The user pool ID for the user pool where you want to create a user pool client.
+ */ + UserPoolId: string | undefined; + + /** + *The client name for the user pool client you would like to create.
+ */ + ClientName: string | undefined; + + /** + *Boolean to specify whether you want to generate a secret for the user pool client + * being created.
+ */ + GenerateSecret?: boolean; + + /** + *The time limit, in days, after which the refresh token is no longer valid and cannot + * be used.
+ */ + RefreshTokenValidity?: number; + /** *The time limit, between 5 minutes and 1 day, after which the access token is no longer valid and cannot be used. This value will be overridden if * you have entered a value in TokenValidityUnits.
*/ AccessTokenValidity?: number; + /** + *The time limit, between 5 minutes and 1 day, after which the ID token is no longer valid and cannot be used. This value will be overridden if + * you have entered a value in TokenValidityUnits.
+ */ + IdTokenValidity?: number; + + /** + *The units in which the validity times are represented in. Default for RefreshToken is days, and default for ID and access tokens are hours.
+ */ + TokenValidityUnits?: TokenValidityUnitsType; + + /** + *The read attributes.
+ */ + ReadAttributes?: string[]; + + /** + *The user pool attributes that the app client can write to.
+ *If your app client allows users to sign in through an identity provider, this array + * must include all attributes that are mapped to identity provider attributes. Amazon + * Cognito updates mapped attributes when users sign in to your application through an + * identity provider. If your app client lacks write access to a mapped attribute, Amazon + * Cognito throws an error when it attempts to update the attribute. For more information, + * see Specifying Identity Provider Attribute Mappings for Your User + * Pool.
+ */ + WriteAttributes?: string[]; + /** *The authentication flows that are supported by the user pool clients. Flow names
* without the ALLOW_
prefix are deprecated in favor of new names with the
@@ -5397,19 +5569,14 @@ export interface CreateUserPoolClientRequest {
ExplicitAuthFlows?: (ExplicitAuthFlowsType | string)[];
/**
- *
The user pool attributes that the app client can write to.
- *If your app client allows users to sign in through an identity provider, this array - * must include all attributes that are mapped to identity provider attributes. Amazon - * Cognito updates mapped attributes when users sign in to your application through an - * identity provider. If your app client lacks write access to a mapped attribute, Amazon - * Cognito throws an error when it attempts to update the attribute. For more information, - * see Specifying Identity Provider Attribute Mappings for Your User - * Pool.
+ *A list of provider names for the identity providers that are supported on this client.
+ * The following are supported: COGNITO
, Facebook
,
+ * Google
and LoginWithAmazon
.
The default redirect URI. Must be in the CallbackURLs
list.
A list of allowed redirect (callback) URLs for the identity providers.
*A redirect URI must:
*App callback URLs such as myapp://example are also supported.
*/ - DefaultRedirectURI?: string; + CallbackURLs?: string[]; /** - *The time limit, in days, after which the refresh token is no longer valid and cannot - * be used.
+ *A list of allowed logout URLs for the identity providers.
*/ - RefreshTokenValidity?: number; + LogoutURLs?: string[]; /** - *Set to true if the client is allowed to follow the OAuth protocol when interacting - * with Cognito user pools.
+ *The default redirect URI. Must be in the CallbackURLs
list.
A redirect URI must:
+ *Be an absolute URI.
+ *Be registered with the authorization server.
+ *Not include a fragment component.
+ *See OAuth 2.0 - + * Redirection Endpoint.
+ *Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing + * purposes only.
+ *App callback URLs such as myapp://example are also supported.
*/ - AllowedOAuthFlowsUserPoolClient?: boolean; + DefaultRedirectURI?: string; /** - *The read attributes.
+ *The allowed OAuth flows.
+ *Set to code
to initiate a code grant flow, which provides an
+ * authorization code as the response. This code can be exchanged for access tokens with
+ * the token endpoint.
Set to implicit
to specify that the client should get the access token
+ * (and, optionally, ID token, based on scopes) directly.
Set to client_credentials
to specify that the client should get the
+ * access token (and, optionally, ID token, based on scopes) from the token endpoint using
+ * a combination of client and client_secret.
Boolean to specify whether you want to generate a secret for the user pool client - * being created.
+ *The allowed OAuth scopes. Possible values provided by OAuth are: phone
,
+ * email
, openid
, and profile
. Possible values
+ * provided by AWS are: aws.cognito.signin.user.admin
. Custom scopes created
+ * in Resource Servers are also supported.
The Amazon Pinpoint analytics configuration for collecting metrics for this user - * pool.
- *Set to true if the client is allowed to follow the OAuth protocol when interacting + * with Cognito user pools.
+ */ + AllowedOAuthFlowsUserPoolClient?: boolean; + + /** + *The Amazon Pinpoint analytics configuration for collecting metrics for this user + * pool.
+ *In regions where Pinpoint is not available, Cognito User Pools only supports sending events to Amazon Pinpoint projects in us-east-1. * In regions where Pinpoint is available, Cognito User Pools will * support sending events to Amazon Pinpoint projects within that same region. @@ -5495,94 +5693,48 @@ export interface CreateUserPoolClientRequest { *
The time limit, between 5 minutes and 1 day, after which the ID token is no longer valid and cannot be used. This value will be overridden if - * you have entered a value in TokenValidityUnits.
- */ - IdTokenValidity?: number; - - /** - *A list of allowed logout URLs for the identity providers.
- */ - LogoutURLs?: string[]; - - /** - *The units in which the validity times are represented in. Default for RefreshToken is days, and default for ID and access tokens are hours.
- */ - TokenValidityUnits?: TokenValidityUnitsType; +export namespace CreateUserPoolClientRequest { + export const filterSensitiveLog = (obj: CreateUserPoolClientRequest): any => ({ + ...obj, + }); +} +/** + *Contains information about a user pool client.
+ */ +export interface UserPoolClientType { /** - *A list of provider names for the identity providers that are supported on this client.
- * The following are supported: COGNITO
, Facebook
,
- * Google
and LoginWithAmazon
.
The user pool ID for the user pool client.
*/ - SupportedIdentityProviders?: string[]; + UserPoolId?: string; /** - *The user pool ID for the user pool where you want to create a user pool client.
+ *The client name from the user pool request of the client type.
*/ - UserPoolId: string | undefined; + ClientName?: string; /** - *The allowed OAuth flows.
- *Set to code
to initiate a code grant flow, which provides an
- * authorization code as the response. This code can be exchanged for access tokens with
- * the token endpoint.
Set to implicit
to specify that the client should get the access token
- * (and, optionally, ID token, based on scopes) directly.
Set to client_credentials
to specify that the client should get the
- * access token (and, optionally, ID token, based on scopes) from the token endpoint using
- * a combination of client and client_secret.
The ID of the client associated with the user pool.
*/ - AllowedOAuthFlows?: (OAuthFlowType | string)[]; + ClientId?: string; /** - *The client name for the user pool client you would like to create.
+ *The client secret from the user pool request of the client type.
*/ - ClientName: string | undefined; + ClientSecret?: string; /** - *The allowed OAuth scopes. Possible values provided by OAuth are: phone
,
- * email
, openid
, and profile
. Possible values
- * provided by AWS are: aws.cognito.signin.user.admin
. Custom scopes created
- * in Resource Servers are also supported.
The date the user pool client was last modified.
*/ - AllowedOAuthScopes?: string[]; + LastModifiedDate?: Date; /** - *A list of allowed redirect (callback) URLs for the identity providers.
- *A redirect URI must:
- *Be an absolute URI.
- *Be registered with the authorization server.
- *Not include a fragment component.
- *See OAuth 2.0 - - * Redirection Endpoint.
- *Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing - * purposes only.
- *App callback URLs such as myapp://example are also supported.
+ *The date the user pool client was created.
*/ - CallbackURLs?: string[]; -} - -export namespace CreateUserPoolClientRequest { - export const filterSensitiveLog = (obj: CreateUserPoolClientRequest): any => ({ - ...obj, - }); -} + CreationDate?: Date; -/** - *Contains information about a user pool client.
- */ -export interface UserPoolClientType { /** *The time limit, in days, after which the refresh token is no longer valid and cannot * be used.
@@ -5590,63 +5742,68 @@ export interface UserPoolClientType { RefreshTokenValidity?: number; /** - *The allowed OAuth scopes. Possible values provided by OAuth are: phone
,
- * email
, openid
, and profile
. Possible values
- * provided by AWS are: aws.cognito.signin.user.admin
. Custom scopes created
- * in Resource Servers are also supported.
The date the user pool client was created.
+ *The time limit, specified by tokenValidityUnits, defaulting to hours, after which the access token is no longer valid and cannot be used.
*/ - CreationDate?: Date; + AccessTokenValidity?: number; /** - *The Amazon Pinpoint analytics configuration for the user pool client.
- *Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.
- *The time limit, specified by tokenValidityUnits, defaulting to hours, after which the refresh token is no longer valid and cannot be used.
*/ - AnalyticsConfiguration?: AnalyticsConfigurationType; + IdTokenValidity?: number; /** - *The date the user pool client was last modified.
+ *The time units used to specify the token validity times of their respective token.
*/ - LastModifiedDate?: Date; + TokenValidityUnits?: TokenValidityUnitsType; /** - *Set to true if the client is allowed to follow the OAuth protocol when interacting - * with Cognito user pools.
+ *The Read-only attributes.
*/ - AllowedOAuthFlowsUserPoolClient?: boolean; + ReadAttributes?: string[]; /** - *The Read-only attributes.
+ *The writeable attributes.
*/ - ReadAttributes?: string[]; + WriteAttributes?: string[]; /** - *The default redirect URI. Must be in the CallbackURLs
list.
A redirect URI must:
+ *The authentication flows that are supported by the user pool clients. Flow names
+ * without the ALLOW_
prefix are deprecated in favor of new names with the
+ * ALLOW_
prefix. Note that values with ALLOW_
prefix cannot
+ * be used along with values without ALLOW_
prefix.
Valid values include:
*Be an absolute URI.
+ *
+ * ALLOW_ADMIN_USER_PASSWORD_AUTH
: Enable admin based user password
+ * authentication flow ADMIN_USER_PASSWORD_AUTH
. This setting replaces
+ * the ADMIN_NO_SRP_AUTH
setting. With this authentication flow,
+ * Cognito receives the password in the request instead of using the SRP (Secure
+ * Remote Password protocol) protocol to verify passwords.
Be registered with the authorization server.
+ *
+ * ALLOW_CUSTOM_AUTH
: Enable Lambda trigger based
+ * authentication.
Not include a fragment component.
+ *
+ * ALLOW_USER_PASSWORD_AUTH
: Enable user password-based
+ * authentication. In this flow, Cognito receives the password in the request
+ * instead of using the SRP protocol to verify passwords.
+ * ALLOW_USER_SRP_AUTH
: Enable SRP based authentication.
+ * ALLOW_REFRESH_TOKEN_AUTH
: Enable authflow to refresh
+ * tokens.
See OAuth 2.0 - - * Redirection Endpoint.
- *Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing - * purposes only.
- *App callback URLs such as myapp://example are also supported.
*/ - DefaultRedirectURI?: string; + ExplicitAuthFlows?: (ExplicitAuthFlowsType | string)[]; /** *A list of provider names for the identity providers that are supported on this @@ -5654,11 +5811,6 @@ export interface UserPoolClientType { */ SupportedIdentityProviders?: string[]; - /** - *
The user pool ID for the user pool client.
- */ - UserPoolId?: string; - /** *A list of allowed redirect (callback) URLs for the identity providers.
*A redirect URI must:
@@ -5682,134 +5834,104 @@ export interface UserPoolClientType { CallbackURLs?: string[]; /** - *The time units used to specify the token validity times of their respective token.
- */ - TokenValidityUnits?: TokenValidityUnitsType; - - /** - *Use this setting to choose which errors and responses are returned by Cognito APIs
- * during authentication, account confirmation, and password recovery when the user does
- * not exist in the user pool. When set to ENABLED
and the user does not
- * exist, authentication returns an error indicating either the username or password was
- * incorrect, and account confirmation and password recovery return a response indicating a
- * code was sent to a simulated destination. When set to LEGACY
, those APIs
- * will return a UserNotFoundException
exception if the user does not exist in
- * the user pool.
Valid values include:
- *
- * ENABLED
- This prevents user existence-related errors.
- * LEGACY
- This represents the old behavior of Cognito where user
- * existence related errors are not prevented.
After February 15th 2020, the value of PreventUserExistenceErrors
- * will default to ENABLED
for newly created user pool clients if no value
- * is provided.
The allowed OAuth flows.
- *Set to code
to initiate a code grant flow, which provides an
- * authorization code as the response. This code can be exchanged for access tokens with
- * the token endpoint.
Set to implicit
to specify that the client should get the access token
- * (and, optionally, ID token, based on scopes) directly.
Set to client_credentials
to specify that the client should get the
- * access token (and, optionally, ID token, based on scopes) from the token endpoint using
- * a combination of client and client_secret.
The client name from the user pool request of the client type.
- */ - ClientName?: string; - - /** - *The writeable attributes.
- */ - WriteAttributes?: string[]; - - /** - *The client secret from the user pool request of the client type.
+ *A list of allowed logout URLs for the identity providers.
*/ - ClientSecret?: string; + LogoutURLs?: string[]; /** - *The authentication flows that are supported by the user pool clients. Flow names
- * without the ALLOW_
prefix are deprecated in favor of new names with the
- * ALLOW_
prefix. Note that values with ALLOW_
prefix cannot
- * be used along with values without ALLOW_
prefix.
Valid values include:
+ *The default redirect URI. Must be in the CallbackURLs
list.
A redirect URI must:
*
- * ALLOW_ADMIN_USER_PASSWORD_AUTH
: Enable admin based user password
- * authentication flow ADMIN_USER_PASSWORD_AUTH
. This setting replaces
- * the ADMIN_NO_SRP_AUTH
setting. With this authentication flow,
- * Cognito receives the password in the request instead of using the SRP (Secure
- * Remote Password protocol) protocol to verify passwords.
- * ALLOW_CUSTOM_AUTH
: Enable Lambda trigger based
- * authentication.
- * ALLOW_USER_PASSWORD_AUTH
: Enable user password-based
- * authentication. In this flow, Cognito receives the password in the request
- * instead of using the SRP protocol to verify passwords.
Be an absolute URI.
*
- * ALLOW_USER_SRP_AUTH
: Enable SRP based authentication.
Be registered with the authorization server.
*
- * ALLOW_REFRESH_TOKEN_AUTH
: Enable authflow to refresh
- * tokens.
Not include a fragment component.
*See OAuth 2.0 - + * Redirection Endpoint.
+ *Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing + * purposes only.
+ *App callback URLs such as myapp://example are also supported.
*/ - ExplicitAuthFlows?: (ExplicitAuthFlowsType | string)[]; + DefaultRedirectURI?: string; /** - *A list of allowed logout URLs for the identity providers.
+ *The allowed OAuth flows.
+ *Set to code
to initiate a code grant flow, which provides an
+ * authorization code as the response. This code can be exchanged for access tokens with
+ * the token endpoint.
Set to implicit
to specify that the client should get the access token
+ * (and, optionally, ID token, based on scopes) directly.
Set to client_credentials
to specify that the client should get the
+ * access token (and, optionally, ID token, based on scopes) from the token endpoint using
+ * a combination of client and client_secret.
The ID of the client associated with the user pool.
+ *The allowed OAuth scopes. Possible values provided by OAuth are: phone
,
+ * email
, openid
, and profile
. Possible values
+ * provided by AWS are: aws.cognito.signin.user.admin
. Custom scopes created
+ * in Resource Servers are also supported.
The time limit, specified by tokenValidityUnits, defaulting to hours, after which the access token is no longer valid and cannot be used.
+ *Set to true if the client is allowed to follow the OAuth protocol when interacting + * with Cognito user pools.
*/ - AccessTokenValidity?: number; + AllowedOAuthFlowsUserPoolClient?: boolean; /** - *The time limit, specified by tokenValidityUnits, defaulting to hours, after which the refresh token is no longer valid and cannot be used.
+ *The Amazon Pinpoint analytics configuration for the user pool client.
+ *Cognito User Pools only supports sending events to Amazon Pinpoint projects in the US East (N. Virginia) us-east-1 Region, regardless of the region in which the user pool resides.
+ *Use this setting to choose which errors and responses are returned by Cognito APIs
+ * during authentication, account confirmation, and password recovery when the user does
+ * not exist in the user pool. When set to ENABLED
and the user does not
+ * exist, authentication returns an error indicating either the username or password was
+ * incorrect, and account confirmation and password recovery return a response indicating a
+ * code was sent to a simulated destination. When set to LEGACY
, those APIs
+ * will return a UserNotFoundException
exception if the user does not exist in
+ * the user pool.
Valid values include:
+ *
+ * ENABLED
- This prevents user existence-related errors.
+ * LEGACY
- This represents the old behavior of Cognito where user
+ * existence related errors are not prevented.
After February 15th 2020, the value of PreventUserExistenceErrors
+ * will default to ENABLED
for newly created user pool clients if no value
+ * is provided.
The configuration for a custom domain that hosts the sign-up and sign-in webpages for - * your application.
- *Provide this parameter only if you want to use a custom domain for your user pool. - * Otherwise, you can exclude this parameter and use the Amazon Cognito hosted domain - * instead.
- *For more information about the hosted domain and custom domains, see Configuring a User Pool Domain.
+ *The domain string.
*/ - CustomDomainConfig?: CustomDomainConfigType; + Domain: string | undefined; /** *The user pool ID.
@@ -5895,9 +6012,14 @@ export interface CreateUserPoolDomainRequest { UserPoolId: string | undefined; /** - *The domain string.
+ *The configuration for a custom domain that hosts the sign-up and sign-in webpages for + * your application.
+ *Provide this parameter only if you want to use a custom domain for your user pool. + * Otherwise, you can exclude this parameter and use the Amazon Cognito hosted domain + * instead.
+ *For more information about the hosted domain and custom domains, see Configuring a User Pool Domain.
*/ - Domain: string | undefined; + CustomDomainConfig?: CustomDomainConfigType; } export namespace CreateUserPoolDomainRequest { @@ -5922,14 +6044,14 @@ export namespace CreateUserPoolDomainResponse { export interface DeleteGroupRequest { /** - *The user pool ID for the user pool.
+ *The name of the group.
*/ - UserPoolId: string | undefined; + GroupName: string | undefined; /** - *The name of the group.
+ *The user pool ID for the user pool.
*/ - GroupName: string | undefined; + UserPoolId: string | undefined; } export namespace DeleteGroupRequest { @@ -5940,14 +6062,14 @@ export namespace DeleteGroupRequest { export interface DeleteIdentityProviderRequest { /** - *The identity provider name.
+ *The user pool ID.
*/ - ProviderName: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID.
+ *The identity provider name.
*/ - UserPoolId: string | undefined; + ProviderName: string | undefined; } export namespace DeleteIdentityProviderRequest { @@ -6062,14 +6184,14 @@ export namespace DeleteUserPoolRequest { */ export interface DeleteUserPoolClientRequest { /** - *The app client ID of the app associated with the user pool.
+ *The user pool ID for the user pool where you want to delete the client.
*/ - ClientId: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID for the user pool where you want to delete the client.
+ *The app client ID of the app associated with the user pool.
*/ - UserPoolId: string | undefined; + ClientId: string | undefined; } export namespace DeleteUserPoolClientRequest { @@ -6107,14 +6229,14 @@ export namespace DeleteUserPoolDomainResponse { export interface DescribeIdentityProviderRequest { /** - *The identity provider name.
+ *The user pool ID.
*/ - ProviderName: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID.
+ *The identity provider name.
*/ - UserPoolId: string | undefined; + ProviderName: string | undefined; } export namespace DescribeIdentityProviderRequest { @@ -6239,18 +6361,18 @@ export namespace CompromisedCredentialsRiskConfigurationType { *The type of the configuration to override the risk decision.
*/ export interface RiskExceptionConfigurationType { - /** - *Risk detection is not performed on the IP addresses in the range list. The IP range is - * in CIDR notation.
- */ - SkippedIPRangeList?: string[]; - /** *Overrides the risk decision to always block the pre-authentication requests. The IP * range is in CIDR notation: a compact representation of an IP address and its associated * routing prefix.
*/ BlockedIPRangeList?: string[]; + + /** + *Risk detection is not performed on the IP addresses in the range list. The IP range is + * in CIDR notation.
+ */ + SkippedIPRangeList?: string[]; } export namespace RiskExceptionConfigurationType { @@ -6263,15 +6385,22 @@ export namespace RiskExceptionConfigurationType { *The risk configuration type.
*/ export interface RiskConfigurationType { + /** + *The user pool ID.
+ */ + UserPoolId?: string; + /** *The app client ID.
*/ ClientId?: string; /** - *The user pool ID.
+ *The compromised credentials risk configuration object including the
+ * EventFilter
and the EventAction
+ *
The account takeover risk configuration object including the @@ -6285,13 +6414,6 @@ export interface RiskConfigurationType { */ RiskExceptionConfiguration?: RiskExceptionConfigurationType; - /** - *
The compromised credentials risk configuration object including the
- * EventFilter
and the EventAction
- *
The last modified date.
*/ @@ -6326,14 +6448,14 @@ export namespace DescribeRiskConfigurationResponse { */ export interface DescribeUserImportJobRequest { /** - *The job ID for the user import job.
+ *The user pool ID for the user pool that the users are being imported into.
*/ - JobId: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID for the user pool that the users are being imported into.
+ *The job ID for the user import job.
*/ - UserPoolId: string | undefined; + JobId: string | undefined; } export namespace DescribeUserImportJobRequest { @@ -6396,14 +6518,14 @@ export namespace DescribeUserPoolResponse { */ export interface DescribeUserPoolClientRequest { /** - *The app client ID of the app associated with the user pool.
+ *The user pool ID for the user pool you want to describe.
*/ - ClientId: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID for the user pool you want to describe.
+ *The app client ID of the app associated with the user pool.
*/ - UserPoolId: string | undefined; + ClientId: string | undefined; } export namespace DescribeUserPoolClientRequest { @@ -6457,45 +6579,45 @@ export enum DomainStatusType { */ export interface DomainDescriptionType { /** - *The configuration for a custom domain that hosts the sign-up and sign-in webpages for - * your application.
+ *The user pool ID.
*/ - CustomDomainConfig?: CustomDomainConfigType; + UserPoolId?: string; /** - *The domain string.
+ *The AWS account ID for the user pool owner.
*/ - Domain?: string; + AWSAccountId?: string; /** - *The ARN of the CloudFront distribution.
+ *The domain string.
*/ - CloudFrontDistribution?: string; + Domain?: string; /** - *The domain status.
+ *The S3 bucket where the static files for this domain are stored.
*/ - Status?: DomainStatusType | string; + S3Bucket?: string; /** - *The user pool ID.
+ *The ARN of the CloudFront distribution.
*/ - UserPoolId?: string; + CloudFrontDistribution?: string; /** - *The AWS account ID for the user pool owner.
+ *The app version.
*/ - AWSAccountId?: string; + Version?: string; /** - *The app version.
+ *The domain status.
*/ - Version?: string; + Status?: DomainStatusType | string; /** - *The S3 bucket where the static files for this domain are stored.
+ *The configuration for a custom domain that hosts the sign-up and sign-in webpages for + * your application.
*/ - S3Bucket?: string; + CustomDomainConfig?: CustomDomainConfigType; } export namespace DomainDescriptionType { @@ -6543,6 +6665,30 @@ export namespace ForgetDeviceRequest { *Represents the request to reset a user's password.
*/ export interface ForgotPasswordRequest { + /** + *The ID of the client associated with the user pool.
+ */ + ClientId: string | undefined; + + /** + *A keyed-hash message authentication code (HMAC) calculated using the secret key of a + * user pool client and username plus the client ID in the message.
+ */ + SecretHash?: string; + + /** + *Contextual data such as the user's device fingerprint, IP address, or location used + * for evaluating the risk of an unexpected event by Amazon Cognito advanced + * security.
+ */ + UserContextData?: UserContextDataType; + + /** + *The user name of the user for whom you want to enter a code to reset a forgotten + * password.
+ */ + Username: string | undefined; + /** *The Amazon Pinpoint analytics metadata for collecting metrics for
* ForgotPassword
calls.
Amazon Cognito does not validate the ClientMetadata value.
- *Amazon Cognito does not encrypt the the ClientMetadata value, so don't use - * it to provide sensitive information.
- *The user name of the user for whom you want to enter a code to reset a forgotten - * password.
- */ - Username: string | undefined; - - /** - *Contextual data such as the user's device fingerprint, IP address, or location used - * for evaluating the risk of an unexpected event by Amazon Cognito advanced - * security.
- */ - UserContextData?: UserContextDataType; - - /** - *A keyed-hash message authentication code (HMAC) calculated using the secret key of a - * user pool client and username plus the client ID in the message.
- */ - SecretHash?: string; - - /** - *The ID of the client associated with the user pool.
+ *Amazon Cognito does not validate the ClientMetadata value.
+ * + *Amazon Cognito does not encrypt the the ClientMetadata value, so don't use + * it to provide sensitive information.
+ *The access token.
+ *The device key.
*/ - AccessToken?: string; + DeviceKey: string | undefined; /** - *The device key.
+ *The access token.
*/ - DeviceKey: string | undefined; + AccessToken?: string; } export namespace GetDeviceRequest { @@ -6775,14 +6897,14 @@ export namespace GetGroupResponse { export interface GetIdentityProviderByIdentifierRequest { /** - *The identity provider ID.
+ *The user pool ID.
*/ - IdpIdentifier: string | undefined; + UserPoolId: string | undefined; /** - *The user pool ID.
+ *The identity provider ID.
*/ - UserPoolId: string | undefined; + IdpIdentifier: string | undefined; } export namespace GetIdentityProviderByIdentifierRequest { @@ -6838,14 +6960,14 @@ export namespace GetSigningCertificateResponse { export interface GetUICustomizationRequest { /** - *The client ID for the client app.
+ *The user pool ID for the user pool.
*/ - ClientId?: string; + UserPoolId: string | undefined; /** - *The user pool ID for the user pool.
+ *The client ID for the client app.
*/ - UserPoolId: string | undefined; + ClientId?: string; } export namespace GetUICustomizationRequest { @@ -6866,34 +6988,34 @@ export interface UICustomizationType { UserPoolId?: string; /** - *The CSS values in the UI customization.
+ *The client ID for the client app.
*/ - CSS?: string; + ClientId?: string; /** - *The last-modified date for the UI customization.
+ *The logo image for the UI customization.
*/ - LastModifiedDate?: Date; + ImageUrl?: string; /** - *The creation date for the UI customization.
+ *The CSS values in the UI customization.
*/ - CreationDate?: Date; + CSS?: string; /** - *The client ID for the client app.
+ *The CSS version number.
*/ - ClientId?: string; + CSSVersion?: string; /** - *The logo image for the UI customization.
+ *The last-modified date for the UI customization.
*/ - ImageUrl?: string; + LastModifiedDate?: Date; /** - *The CSS version number.
+ *The creation date for the UI customization.
*/ - CSSVersion?: string; + CreationDate?: Date; } export namespace UICustomizationType { @@ -6940,12 +7062,6 @@ export namespace GetUserRequest { * user. */ export interface GetUserResponse { - /** - *The MFA options that are enabled for the user. The possible values in this list are
- * SMS_MFA
and SOFTWARE_TOKEN_MFA
.
The user name of the user you wish to retrieve from the get user request.
*/ @@ -6971,6 +7087,12 @@ export interface GetUserResponse { *The user's preferred MFA setting.
*/ PreferredMfaSetting?: string; + + /** + *The MFA options that are enabled for the user. The possible values in this list are
+ * SMS_MFA
and SOFTWARE_TOKEN_MFA
.
Represents the request to get user attribute verification.
*/ export interface GetUserAttributeVerificationCodeRequest { + /** + *The access token returned by the server response to get the user attribute + * verification code.
+ */ + AccessToken: string | undefined; + /** *The attribute name returned by the server response to get the user attribute * verification code.
@@ -7028,12 +7156,6 @@ export interface GetUserAttributeVerificationCodeRequest { * */ ClientMetadata?: { [key: string]: string }; - - /** - *The access token returned by the server response to get the user attribute - * verification code.
- */ - AccessToken: string | undefined; } export namespace GetUserAttributeVerificationCodeRequest { @@ -7078,17 +7200,17 @@ export namespace GetUserPoolMfaConfigRequest { *The SMS text message multi-factor authentication (MFA) configuration type.
*/ export interface SmsMfaConfigType { - /** - *The SMS configuration.
- */ - SmsConfiguration?: SmsConfigurationType; - /** *The SMS authentication message that will be sent to users with the code they need to * sign in. The message must contain the ‘{####}’ placeholder, which will be replaced with * the code. If the message is not included, and default message will be used.
*/ SmsAuthenticationMessage?: string; + + /** + *The SMS configuration.
+ */ + SmsConfiguration?: SmsConfigurationType; } export namespace SmsMfaConfigType { @@ -7115,14 +7237,14 @@ export namespace SoftwareTokenMfaConfigType { export interface GetUserPoolMfaConfigResponse { /** - *The software token multi-factor (MFA) configuration.
+ *The SMS text message multi-factor (MFA) configuration.
*/ - SoftwareTokenMfaConfiguration?: SoftwareTokenMfaConfigType; + SmsMfaConfiguration?: SmsMfaConfigType; /** - *The SMS text message multi-factor (MFA) configuration.
+ *The software token multi-factor (MFA) configuration.
*/ - SmsMfaConfiguration?: SmsMfaConfigType; + SoftwareTokenMfaConfiguration?: SoftwareTokenMfaConfigType; /** *The multi-factor (MFA) configuration. Valid values include:
@@ -7183,43 +7305,6 @@ export namespace GlobalSignOutResponse { *Initiates the authentication request.
*/ export interface InitiateAuthRequest { - /** - *The Amazon Pinpoint analytics metadata for collecting metrics for
- * InitiateAuth
calls.
The authentication parameters. These are inputs corresponding to the
- * AuthFlow
that you are invoking. The required values depend on the value
- * of AuthFlow
:
For USER_SRP_AUTH
: USERNAME
(required),
- * SRP_A
(required), SECRET_HASH
(required if the app
- * client is configured with a client secret), DEVICE_KEY
.
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN
: REFRESH_TOKEN
- * (required), SECRET_HASH
(required if the app client is configured
- * with a client secret), DEVICE_KEY
.
For CUSTOM_AUTH
: USERNAME
(required),
- * SECRET_HASH
(if app client is configured with client secret),
- * DEVICE_KEY
. To start the authentication flow with password verification, include ChallengeName: SRP_A
and SRP_A: (The SRP_A Value)
.
Contextual data such as the user's device fingerprint, IP address, or location used - * for evaluating the risk of an unexpected event by Amazon Cognito advanced - * security.
- */ - UserContextData?: UserContextDataType; - /** *The authentication flow for this call to execute. The API action will depend on this * value. For example:
@@ -7279,9 +7364,28 @@ export interface InitiateAuthRequest { AuthFlow: AuthFlowType | string | undefined; /** - *The app client ID.
+ *The authentication parameters. These are inputs corresponding to the
+ * AuthFlow
that you are invoking. The required values depend on the value
+ * of AuthFlow
:
For USER_SRP_AUTH
: USERNAME
(required),
+ * SRP_A
(required), SECRET_HASH
(required if the app
+ * client is configured with a client secret), DEVICE_KEY
.
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN
: REFRESH_TOKEN
+ * (required), SECRET_HASH
(required if the app client is configured
+ * with a client secret), DEVICE_KEY
.
For CUSTOM_AUTH
: USERNAME
(required),
+ * SECRET_HASH
(if app client is configured with client secret),
+ * DEVICE_KEY
. To start the authentication flow with password verification, include ChallengeName: SRP_A
and SRP_A: (The SRP_A Value)
.
A map of custom key-value pairs that you can provide as input for certain custom @@ -7357,6 +7461,24 @@ export interface InitiateAuthRequest { * */ ClientMetadata?: { [key: string]: string }; + + /** + *
The app client ID.
+ */ + ClientId: string | undefined; + + /** + *The Amazon Pinpoint analytics metadata for collecting metrics for
+ * InitiateAuth
calls.
Contextual data such as the user's device fingerprint, IP address, or location used + * for evaluating the risk of an unexpected event by Amazon Cognito advanced + * security.
+ */ + UserContextData?: UserContextDataType; } export namespace InitiateAuthRequest { @@ -7418,14 +7540,6 @@ export interface InitiateAuthResponse { */ ChallengeName?: ChallengeNameType | string; - /** - *The result of the authentication response. This is only returned if the caller does
- * not need to pass another challenge. If the caller does need to pass another challenge
- * before it gets tokens, ChallengeName
, ChallengeParameters
, and
- * Session
are returned.
The session which should be passed both ways in challenge-response calls to the * service. If the caller needs to @@ -7443,6 +7557,14 @@ export interface InitiateAuthResponse { * applicable).
*/ ChallengeParameters?: { [key: string]: string }; + + /** + *The result of the authentication response. This is only returned if the caller does
+ * not need to pass another challenge. If the caller does need to pass another challenge
+ * before it gets tokens, ChallengeName
, ChallengeParameters
, and
+ * Session
are returned.
Represents the request to list the devices.
*/ export interface ListDevicesRequest { + /** + *The access tokens for the request to list devices.
+ */ + AccessToken: string | undefined; + /** *The limit of the device request.
*/ @@ -7467,11 +7594,6 @@ export interface ListDevicesRequest { *The pagination token for the list request.
*/ PaginationToken?: string; - - /** - *The access tokens for the request to list devices.
- */ - AccessToken: string | undefined; } export namespace ListDevicesRequest { @@ -7509,15 +7631,15 @@ export interface ListGroupsRequest { UserPoolId: string | undefined; /** - *An identifier that was returned from the previous call to this operation, which can be - * used to return the next set of items in the list.
+ *The limit of the request to list groups.
*/ - NextToken?: string; + Limit?: number; /** - *The limit of the request to list groups.
+ *An identifier that was returned from the previous call to this operation, which can be + * used to return the next set of items in the list.
*/ - Limit?: number; + NextToken?: string; } export namespace ListGroupsRequest { @@ -7572,25 +7694,25 @@ export namespace ListIdentityProvidersRequest { *A container for identity provider details.
*/ export interface ProviderDescription { - /** - *The identity provider type.
- */ - ProviderType?: IdentityProviderTypeType | string; - /** *The identity provider name.
*/ ProviderName?: string; /** - *The date the provider was added to the user pool.
+ *The identity provider type.
*/ - CreationDate?: Date; + ProviderType?: IdentityProviderTypeType | string; /** *The date the provider was last modified.
*/ LastModifiedDate?: Date; + + /** + *The date the provider was added to the user pool.
+ */ + CreationDate?: Date; } export namespace ProviderDescription { @@ -7601,14 +7723,14 @@ export namespace ProviderDescription { export interface ListIdentityProvidersResponse { /** - *A pagination token.
+ *A list of identity provider objects.
*/ - NextToken?: string; + Providers: ProviderDescription[] | undefined; /** - *A list of identity provider objects.
+ *A pagination token.
*/ - Providers: ProviderDescription[] | undefined; + NextToken?: string; } export namespace ListIdentityProvidersResponse { @@ -7642,14 +7764,14 @@ export namespace ListResourceServersRequest { export interface ListResourceServersResponse { /** - *A pagination token.
+ *The resource servers.
*/ - NextToken?: string; + ResourceServers: ResourceServerType[] | undefined; /** - *The resource servers.
+ *A pagination token.
*/ - ResourceServers: ResourceServerType[] | undefined; + NextToken?: string; } export namespace ListResourceServersResponse { @@ -7694,16 +7816,16 @@ export interface ListUserImportJobsRequest { UserPoolId: string | undefined; /** - *An identifier that was returned from the previous call to
- * ListUserImportJobs
, which can be used to return the next set of import
- * jobs in the list.
The maximum number of import jobs you want the request to return.
*/ - PaginationToken?: string; + MaxResults: number | undefined; /** - *The maximum number of import jobs you want the request to return.
+ *An identifier that was returned from the previous call to
+ * ListUserImportJobs
, which can be used to return the next set of import
+ * jobs in the list.
An identifier that can be used to return the next set of user import jobs in the - * list.
+ *The user import jobs.
*/ - PaginationToken?: string; + UserImportJobs?: UserImportJobType[]; /** - *The user import jobs.
+ *An identifier that can be used to return the next set of user import jobs in the + * list.
*/ - UserImportJobs?: UserImportJobType[]; + PaginationToken?: string; } export namespace ListUserImportJobsResponse { @@ -7739,6 +7861,11 @@ export namespace ListUserImportJobsResponse { *Represents the request to list the user pool clients.
*/ export interface ListUserPoolClientsRequest { + /** + *The user pool ID for the user pool where you want to list user pool clients.
+ */ + UserPoolId: string | undefined; + /** *The maximum number of results you want the request to return when listing the user * pool clients.
@@ -7750,11 +7877,6 @@ export interface ListUserPoolClientsRequest { * used to return the next set of items in the list. */ NextToken?: string; - - /** - *The user pool ID for the user pool where you want to list user pool clients.
- */ - UserPoolId: string | undefined; } export namespace ListUserPoolClientsRequest { @@ -7844,34 +7966,34 @@ export namespace ListUserPoolsRequest { */ export interface UserPoolDescriptionType { /** - *The date the user pool description was last modified.
+ *The ID in a user pool description.
*/ - LastModifiedDate?: Date; + Id?: string; /** - *The date the user pool description was created.
+ *The name in a user pool description.
*/ - CreationDate?: Date; + Name?: string; /** - *The user pool status in a user pool description.
+ *The AWS Lambda configuration information in a user pool description.
*/ - Status?: StatusType | string; + LambdaConfig?: LambdaConfigType; /** - *The AWS Lambda configuration information in a user pool description.
+ *The user pool status in a user pool description.
*/ - LambdaConfig?: LambdaConfigType; + Status?: StatusType | string; /** - *The ID in a user pool description.
+ *The date the user pool description was last modified.
*/ - Id?: string; + LastModifiedDate?: Date; /** - *The name in a user pool description.
+ *The date the user pool description was created.
*/ - Name?: string; + CreationDate?: Date; } export namespace UserPoolDescriptionType { @@ -7885,15 +8007,15 @@ export namespace UserPoolDescriptionType { */ export interface ListUserPoolsResponse { /** - *An identifier that was returned from the previous call to this operation, which can be - * used to return the next set of items in the list.
+ *The user pools from the response to list users.
*/ - NextToken?: string; + UserPools?: UserPoolDescriptionType[]; /** - *The user pools from the response to list users.
+ *An identifier that was returned from the previous call to this operation, which can be + * used to return the next set of items in the list.
*/ - UserPools?: UserPoolDescriptionType[]; + NextToken?: string; } export namespace ListUserPoolsResponse { @@ -7911,12 +8033,6 @@ export interface ListUsersRequest { */ UserPoolId: string | undefined; - /** - *An identifier that was returned from the previous call to this operation, which can be - * used to return the next set of items in the list.
- */ - PaginationToken?: string; - /** *An array of strings, where each string is the name of a user attribute to be returned * for each user in the search results. If the array is null, all attributes are @@ -7924,6 +8040,17 @@ export interface ListUsersRequest { */ AttributesToGet?: string[]; + /** + *
Maximum number of users to be returned.
+ */ + Limit?: number; + + /** + *An identifier that was returned from the previous call to this operation, which can be + * used to return the next set of items in the list.
+ */ + PaginationToken?: string; + /** *A filter string of the form "AttributeName * Filter-Type "AttributeValue"". Quotation marks @@ -8006,11 +8133,6 @@ export interface ListUsersRequest { * Developer Guide.
*/ Filter?: string; - - /** - *Maximum number of users to be returned.
- */ - Limit?: number; } export namespace ListUsersRequest { @@ -8024,15 +8146,15 @@ export namespace ListUsersRequest { */ export interface ListUsersResponse { /** - *An identifier that was returned from the previous call to this operation, which can be - * used to return the next set of items in the list.
+ *The users returned in the request to list users.
*/ - PaginationToken?: string; + Users?: UserType[]; /** - *The users returned in the request to list users.
+ *An identifier that was returned from the previous call to this operation, which can be + * used to return the next set of items in the list.
*/ - Users?: UserType[]; + PaginationToken?: string; } export namespace ListUsersResponse { @@ -8043,11 +8165,6 @@ export namespace ListUsersResponse { } export interface ListUsersInGroupRequest { - /** - *The limit of the request to list users.
- */ - Limit?: number; - /** *The user pool ID for the user pool.
*/ @@ -8058,6 +8175,11 @@ export interface ListUsersInGroupRequest { */ GroupName: string | undefined; + /** + *The limit of the request to list users.
+ */ + Limit?: number; + /** *An identifier that was returned from the previous call to this operation, which can be * used to return the next set of items in the list.
@@ -8096,9 +8218,15 @@ export namespace ListUsersInGroupResponse { */ export interface ResendConfirmationCodeRequest { /** - *The user name of the user to whom you wish to resend a confirmation code.
+ *The ID of the client associated with the user pool.
*/ - Username: string | undefined; + ClientId: string | undefined; + + /** + *A keyed-hash message authentication code (HMAC) calculated using the secret key of a + * user pool client and username plus the client ID in the message.
+ */ + SecretHash?: string; /** *Contextual data such as the user's device fingerprint, IP address, or location used @@ -8108,15 +8236,15 @@ export interface ResendConfirmationCodeRequest { UserContextData?: UserContextDataType; /** - *
A keyed-hash message authentication code (HMAC) calculated using the secret key of a - * user pool client and username plus the client ID in the message.
+ *The user name of the user to whom you wish to resend a confirmation code.
*/ - SecretHash?: string; + Username: string | undefined; /** - *The ID of the client associated with the user pool.
+ *The Amazon Pinpoint analytics metadata for collecting metrics for
+ * ResendConfirmationCode
calls.
A map of custom key-value pairs that you can provide as input for any custom workflows @@ -8153,20 +8281,14 @@ export interface ResendConfirmationCodeRequest { * */ ClientMetadata?: { [key: string]: string }; - - /** - *
The Amazon Pinpoint analytics metadata for collecting metrics for
- * ResendConfirmationCode
calls.
The session which should be passed both ways in challenge-response calls to the
- * service. If InitiateAuth
or RespondToAuthChallenge
API call
- * determines that the caller needs to go through another challenge, they return a session
- * with other challenge parameters. This session should be passed as it is to the next
- * RespondToAuthChallenge
API call.
The Amazon Pinpoint analytics metadata for collecting metrics for
- * RespondToAuthChallenge
calls.
The app client ID.
*/ - AnalyticsMetadata?: AnalyticsMetadataType; + ClientId: string | undefined; /** *The challenge name. For more information, see InitiateAuth.
@@ -8215,54 +8327,13 @@ export interface RespondToAuthChallengeRequest { ChallengeName: ChallengeNameType | string | undefined; /** - *A map of custom key-value pairs that you can provide as input for any custom workflows - * that this action triggers.
- *You create custom workflows by assigning AWS Lambda functions to user pool triggers.
- * When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any functions
- * that are assigned to the following triggers: post authentication,
- * pre token generation, define auth
- * challenge, create auth challenge, and
- * verify auth challenge. When Amazon Cognito invokes any of these
- * functions, it passes a JSON payload, which the function receives as input. This payload
- * contains a clientMetadata
attribute, which provides the data that you
- * assigned to the ClientMetadata parameter in your RespondToAuthChallenge request. In your
- * function code in AWS Lambda, you can process the clientMetadata
value to
- * enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the - * Amazon Cognito Developer Guide.
- *Take the following limitations into consideration when you use the ClientMetadata - * parameter:
- *Amazon Cognito does not store the ClientMetadata value. This data is - * available only to AWS Lambda triggers that are assigned to a user pool to - * support custom workflows. If your user pool configuration does not include - * triggers, the ClientMetadata parameter serves no purpose.
- *Amazon Cognito does not validate the ClientMetadata value.
- *Amazon Cognito does not encrypt the the ClientMetadata value, so don't use - * it to provide sensitive information.
- *The app client ID.
- */ - ClientId: string | undefined; - - /** - *Contextual data such as the user's device fingerprint, IP address, or location used - * for evaluating the risk of an unexpected event by Amazon Cognito advanced - * security.
+ *The session which should be passed both ways in challenge-response calls to the
+ * service. If InitiateAuth
or RespondToAuthChallenge
API call
+ * determines that the caller needs to go through another challenge, they return a session
+ * with other challenge parameters. This session should be passed as it is to the next
+ * RespondToAuthChallenge
API call.
The challenge responses. These are inputs corresponding to the value of @@ -8307,6 +8378,57 @@ export interface RespondToAuthChallengeRequest { * */ ChallengeResponses?: { [key: string]: string }; + + /** + *
The Amazon Pinpoint analytics metadata for collecting metrics for
+ * RespondToAuthChallenge
calls.
Contextual data such as the user's device fingerprint, IP address, or location used + * for evaluating the risk of an unexpected event by Amazon Cognito advanced + * security.
+ */ + UserContextData?: UserContextDataType; + + /** + *A map of custom key-value pairs that you can provide as input for any custom workflows + * that this action triggers.
+ *You create custom workflows by assigning AWS Lambda functions to user pool triggers.
+ * When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any functions
+ * that are assigned to the following triggers: post authentication,
+ * pre token generation, define auth
+ * challenge, create auth challenge, and
+ * verify auth challenge. When Amazon Cognito invokes any of these
+ * functions, it passes a JSON payload, which the function receives as input. This payload
+ * contains a clientMetadata
attribute, which provides the data that you
+ * assigned to the ClientMetadata parameter in your RespondToAuthChallenge request. In your
+ * function code in AWS Lambda, you can process the clientMetadata
value to
+ * enhance your workflow for your specific needs.
For more information, see Customizing User Pool Workflows with Lambda Triggers in the + * Amazon Cognito Developer Guide.
+ *Take the following limitations into consideration when you use the ClientMetadata + * parameter:
+ *Amazon Cognito does not store the ClientMetadata value. This data is + * available only to AWS Lambda triggers that are assigned to a user pool to + * support custom workflows. If your user pool configuration does not include + * triggers, the ClientMetadata parameter serves no purpose.
+ *Amazon Cognito does not validate the ClientMetadata value.
+ *Amazon Cognito does not encrypt the the ClientMetadata value, so don't use + * it to provide sensitive information.
+ *The response to respond to the authentication challenge.
- */ -export interface RespondToAuthChallengeResponse { - /** - *The challenge parameters. For more information, see InitiateAuth.
- */ - ChallengeParameters?: { [key: string]: string }; - + *The response to respond to the authentication challenge.
+ */ +export interface RespondToAuthChallengeResponse { /** - *The result returned by the server in response to the request to respond to the - * authentication challenge.
+ *The challenge name. For more information, see InitiateAuth.
*/ - AuthenticationResult?: AuthenticationResultType; + ChallengeName?: ChallengeNameType | string; /** *The session which should be passed both ways in challenge-response calls to the @@ -8341,9 +8457,15 @@ export interface RespondToAuthChallengeResponse { Session?: string; /** - *
The challenge name. For more information, see InitiateAuth.
+ *The challenge parameters. For more information, see InitiateAuth.
*/ - ChallengeName?: ChallengeNameType | string; + ChallengeParameters?: { [key: string]: string }; + + /** + *The result returned by the server in response to the request to respond to the + * authentication challenge.
+ */ + AuthenticationResult?: AuthenticationResultType; } export namespace RespondToAuthChallengeResponse { @@ -8357,14 +8479,19 @@ export namespace RespondToAuthChallengeResponse { export interface SetRiskConfigurationRequest { /** - *The configuration to override the risk decision.
+ *The user pool ID.
*/ - RiskExceptionConfiguration?: RiskExceptionConfigurationType; + UserPoolId: string | undefined; /** - *The user pool ID.
+ *The app client ID. If ClientId
is null, then the risk configuration is
+ * mapped to userPoolId
. When the client ID is null, the same risk
+ * configuration is applied to all the clients in the userPool.
Otherwise, ClientId
is mapped to the client. When the client ID is not
+ * null, the user pool configuration is overridden and the risk configuration for the
+ * client is used instead.
The compromised credentials risk configuration.
@@ -8377,14 +8504,9 @@ export interface SetRiskConfigurationRequest { AccountTakeoverRiskConfiguration?: AccountTakeoverRiskConfigurationType; /** - *The app client ID. If ClientId
is null, then the risk configuration is
- * mapped to userPoolId
. When the client ID is null, the same risk
- * configuration is applied to all the clients in the userPool.
Otherwise, ClientId
is mapped to the client. When the client ID is not
- * null, the user pool configuration is overridden and the risk configuration for the
- * client is used instead.
The configuration to override the risk decision.
*/ - ClientId?: string; + RiskExceptionConfiguration?: RiskExceptionConfigurationType; } export namespace SetRiskConfigurationRequest { @@ -8412,24 +8534,24 @@ export namespace SetRiskConfigurationResponse { export interface SetUICustomizationRequest { /** - *The uploaded logo image for the UI customization.
+ *The user pool ID for the user pool.
*/ - ImageFile?: Uint8Array; + UserPoolId: string | undefined; /** - *The CSS values in the UI customization.
+ *The client ID for the client app.
*/ - CSS?: string; + ClientId?: string; /** - *The client ID for the client app.
+ *The CSS values in the UI customization.
*/ - ClientId?: string; + CSS?: string; /** - *The user pool ID for the user pool.
+ *The uploaded logo image for the UI customization.
*/ - UserPoolId: string | undefined; + ImageFile?: Uint8Array; } export namespace SetUICustomizationRequest { @@ -8454,6 +8576,11 @@ export namespace SetUICustomizationResponse { } export interface SetUserMFAPreferenceRequest { + /** + *The SMS text message multi-factor authentication (MFA) settings.
+ */ + SMSMfaSettings?: SMSMfaSettingsType; + /** *The time-based one-time password software token MFA settings.
*/ @@ -8463,11 +8590,6 @@ export interface SetUserMFAPreferenceRequest { *The access token for the user.
*/ AccessToken: string | undefined; - - /** - *The SMS text message multi-factor authentication (MFA) settings.
- */ - SMSMfaSettings?: SMSMfaSettingsType; } export namespace SetUserMFAPreferenceRequest { @@ -8486,6 +8608,21 @@ export namespace SetUserMFAPreferenceResponse { } export interface SetUserPoolMfaConfigRequest { + /** + *The user pool ID.
+ */ + UserPoolId: string | undefined; + + /** + *The SMS text message MFA configuration.
+ */ + SmsMfaConfiguration?: SmsMfaConfigType; + + /** + *The software token MFA configuration.
+ */ + SoftwareTokenMfaConfiguration?: SoftwareTokenMfaConfigType; + /** *The MFA configuration. Valid values include:
*The user pool ID.
- */ - UserPoolId: string | undefined; - - /** - *The SMS text message MFA configuration.
- */ - SmsMfaConfiguration?: SmsMfaConfigType; - - /** - *The software token MFA configuration.
- */ - SoftwareTokenMfaConfiguration?: SoftwareTokenMfaConfigType; } export namespace SetUserPoolMfaConfigRequest { @@ -8530,14 +8652,14 @@ export namespace SetUserPoolMfaConfigRequest { export interface SetUserPoolMfaConfigResponse { /** - *The software token MFA configuration.
+ *The SMS text message MFA configuration.
*/ - SoftwareTokenMfaConfiguration?: SoftwareTokenMfaConfigType; + SmsMfaConfiguration?: SmsMfaConfigType; /** - *The SMS text message MFA configuration.
+ *The software token MFA configuration.
*/ - SmsMfaConfiguration?: SmsMfaConfigType; + SoftwareTokenMfaConfiguration?: SoftwareTokenMfaConfigType; /** *The MFA configuration. Valid values include:
@@ -8604,6 +8726,39 @@ export namespace SetUserSettingsResponse { *Represents the request to register a user.
*/ export interface SignUpRequest { + /** + *The ID of the client associated with the user pool.
+ */ + ClientId: string | undefined; + + /** + *A keyed-hash message authentication code (HMAC) calculated using the secret key of a + * user pool client and username plus the client ID in the message.
+ */ + SecretHash?: string; + + /** + *The user name of the user you wish to register.
+ */ + Username: string | undefined; + + /** + *The password of the user you wish to register.
+ */ + Password: string | undefined; + + /** + *An array of name-value pairs representing user attributes.
+ *For custom attributes, you must prepend the custom:
prefix to the
+ * attribute name.
The validation data in the request to register a user.
+ */ + ValidationData?: AttributeType[]; + /** *The Amazon Pinpoint analytics metadata for collecting metrics for SignUp
* calls.
The password of the user you wish to register.
+ *Contextual data such as the user's device fingerprint, IP address, or location used + * for evaluating the risk of an unexpected event by Amazon Cognito advanced + * security.
*/ - Password: string | undefined; + UserContextData?: UserContextDataType; /** *A map of custom key-value pairs that you can provide as input for any custom workflows @@ -8650,56 +8807,21 @@ export interface SignUpRequest { * */ ClientMetadata?: { [key: string]: string }; - - /** - *
An array of name-value pairs representing user attributes.
- *For custom attributes, you must prepend the custom:
prefix to the
- * attribute name.
A keyed-hash message authentication code (HMAC) calculated using the secret key of a - * user pool client and username plus the client ID in the message.
- */ - SecretHash?: string; - - /** - *Contextual data such as the user's device fingerprint, IP address, or location used - * for evaluating the risk of an unexpected event by Amazon Cognito advanced - * security.
- */ - UserContextData?: UserContextDataType; - - /** - *The user name of the user you wish to register.
- */ - Username: string | undefined; - - /** - *The validation data in the request to register a user.
- */ - ValidationData?: AttributeType[]; - - /** - *The ID of the client associated with the user pool.
- */ - ClientId: string | undefined; } export namespace SignUpRequest { export const filterSensitiveLog = (obj: SignUpRequest): any => ({ ...obj, + ...(obj.ClientId && { ClientId: SENSITIVE_STRING }), + ...(obj.SecretHash && { SecretHash: SENSITIVE_STRING }), + ...(obj.Username && { Username: SENSITIVE_STRING }), ...(obj.Password && { Password: SENSITIVE_STRING }), ...(obj.UserAttributes && { UserAttributes: obj.UserAttributes.map((item) => AttributeType.filterSensitiveLog(item)), }), - ...(obj.SecretHash && { SecretHash: SENSITIVE_STRING }), - ...(obj.Username && { Username: SENSITIVE_STRING }), ...(obj.ValidationData && { ValidationData: obj.ValidationData.map((item) => AttributeType.filterSensitiveLog(item)), }), - ...(obj.ClientId && { ClientId: SENSITIVE_STRING }), }); } @@ -8708,10 +8830,10 @@ export namespace SignUpRequest { */ export interface SignUpResponse { /** - *The UUID of the authenticated user. This is not the same as
- * username
.
A response from the server indicating that a user registration has been + * confirmed.
*/ - UserSub: string | undefined; + UserConfirmed: boolean | undefined; /** *The code delivery details returned by the server response to the user registration @@ -8720,10 +8842,10 @@ export interface SignUpResponse { CodeDeliveryDetails?: CodeDeliveryDetailsType; /** - *
A response from the server indicating that a user registration has been - * confirmed.
+ *The UUID of the authenticated user. This is not the same as
+ * username
.
The tags to assign to the user pool.
+ *The Amazon Resource Name (ARN) of the user pool to assign the tags to.
*/ - Tags: { [key: string]: string } | undefined; + ResourceArn: string | undefined; /** - *The Amazon Resource Name (ARN) of the user pool to assign the tags to.
+ *The tags to assign to the user pool.
*/ - ResourceArn: string | undefined; + Tags: { [key: string]: string } | undefined; } export namespace TagResourceRequest { @@ -8833,72 +8955,3 @@ export namespace TagResourceResponse { ...obj, }); } - -export interface UntagResourceRequest { - /** - *The keys of the tags to remove from the user pool.
- */ - TagKeys: string[] | undefined; - - /** - *The Amazon Resource Name (ARN) of the user pool that the tags are assigned to.
- */ - ResourceArn: string | undefined; -} - -export namespace UntagResourceRequest { - export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ - ...obj, - }); -} - -export interface UntagResourceResponse {} - -export namespace UntagResourceResponse { - export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ - ...obj, - }); -} - -export interface UpdateAuthEventFeedbackRequest { - /** - *The event ID.
- */ - EventId: string | undefined; - - /** - *The feedback token.
- */ - FeedbackToken: string | undefined; - - /** - *The authentication event feedback value.
- */ - FeedbackValue: FeedbackValueType | string | undefined; - - /** - *The user pool ID.
- */ - UserPoolId: string | undefined; - - /** - *The user pool username.
- */ - Username: string | undefined; -} - -export namespace UpdateAuthEventFeedbackRequest { - export const filterSensitiveLog = (obj: UpdateAuthEventFeedbackRequest): any => ({ - ...obj, - ...(obj.FeedbackToken && { FeedbackToken: SENSITIVE_STRING }), - ...(obj.Username && { Username: SENSITIVE_STRING }), - }); -} - -export interface UpdateAuthEventFeedbackResponse {} - -export namespace UpdateAuthEventFeedbackResponse { - export const filterSensitiveLog = (obj: UpdateAuthEventFeedbackResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-cognito-identity-provider/models/models_1.ts b/clients/client-cognito-identity-provider/models/models_1.ts index 824f1b8ea225..e82af2c4656d 100644 --- a/clients/client-cognito-identity-provider/models/models_1.ts +++ b/clients/client-cognito-identity-provider/models/models_1.ts @@ -9,6 +9,7 @@ import { DeviceRememberedStatusType, EmailConfigurationType, ExplicitAuthFlowsType, + FeedbackValueType, GroupType, IdentityProviderType, LambdaConfigType, @@ -28,10 +29,84 @@ import { import { SENSITIVE_STRING, SmithyException as __SmithyException } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer } from "@aws-sdk/types"; +export interface UntagResourceRequest { + /** + *The Amazon Resource Name (ARN) of the user pool that the tags are assigned to.
+ */ + ResourceArn: string | undefined; + + /** + *The keys of the tags to remove from the user pool.
+ */ + TagKeys: string[] | undefined; +} + +export namespace UntagResourceRequest { + export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ + ...obj, + }); +} + +export interface UntagResourceResponse {} + +export namespace UntagResourceResponse { + export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UpdateAuthEventFeedbackRequest { + /** + *The user pool ID.
+ */ + UserPoolId: string | undefined; + + /** + *The user pool username.
+ */ + Username: string | undefined; + + /** + *The event ID.
+ */ + EventId: string | undefined; + + /** + *The feedback token.
+ */ + FeedbackToken: string | undefined; + + /** + *The authentication event feedback value.
+ */ + FeedbackValue: FeedbackValueType | string | undefined; +} + +export namespace UpdateAuthEventFeedbackRequest { + export const filterSensitiveLog = (obj: UpdateAuthEventFeedbackRequest): any => ({ + ...obj, + ...(obj.Username && { Username: SENSITIVE_STRING }), + ...(obj.FeedbackToken && { FeedbackToken: SENSITIVE_STRING }), + }); +} + +export interface UpdateAuthEventFeedbackResponse {} + +export namespace UpdateAuthEventFeedbackResponse { + export const filterSensitiveLog = (obj: UpdateAuthEventFeedbackResponse): any => ({ + ...obj, + }); +} + /** *Represents the request to update the device status.
*/ export interface UpdateDeviceStatusRequest { + /** + *The access token.
+ */ + AccessToken: string | undefined; + /** *The device key.
*/ @@ -41,11 +116,6 @@ export interface UpdateDeviceStatusRequest { *The status of whether a device is remembered.
*/ DeviceRememberedStatus?: DeviceRememberedStatusType | string; - - /** - *The access token.
- */ - AccessToken: string | undefined; } export namespace UpdateDeviceStatusRequest { @@ -67,12 +137,6 @@ export namespace UpdateDeviceStatusResponse { } export interface UpdateGroupRequest { - /** - *The new precedence value for the group. For more information about this parameter, see - * CreateGroup.
- */ - Precedence?: number; - /** *The name of the group.
*/ @@ -83,6 +147,11 @@ export interface UpdateGroupRequest { */ UserPoolId: string | undefined; + /** + *A string containing the new description of the group.
+ */ + Description?: string; + /** *The new role ARN for the group. This is used for setting the
* cognito:roles
and cognito:preferred_role
claims in the
@@ -91,9 +160,10 @@ export interface UpdateGroupRequest {
RoleArn?: string;
/**
- *
A string containing the new description of the group.
+ *The new precedence value for the group. For more information about this parameter, see + * CreateGroup.
*/ - Description?: string; + Precedence?: number; } export namespace UpdateGroupRequest { @@ -117,25 +187,25 @@ export namespace UpdateGroupResponse { export interface UpdateIdentityProviderRequest { /** - *The identity provider details to be updated, such as MetadataURL
and
- * MetadataFile
.
The user pool ID.
*/ - ProviderDetails?: { [key: string]: string }; + UserPoolId: string | undefined; /** - *The user pool ID.
+ *The identity provider name.
*/ - UserPoolId: string | undefined; + ProviderName: string | undefined; /** - *The identity provider attribute mapping to be changed.
+ *The identity provider details to be updated, such as MetadataURL
and
+ * MetadataFile
.
The identity provider name.
+ *The identity provider attribute mapping to be changed.
*/ - ProviderName: string | undefined; + AttributeMapping?: { [key: string]: string }; /** *A list of identity provider identifiers.
@@ -164,9 +234,9 @@ export namespace UpdateIdentityProviderResponse { export interface UpdateResourceServerRequest { /** - *The scope values to be set for the resource server.
+ *The user pool ID for the user pool.
*/ - Scopes?: ResourceServerScopeType[]; + UserPoolId: string | undefined; /** *The identifier for the resource server.
@@ -174,14 +244,14 @@ export interface UpdateResourceServerRequest { Identifier: string | undefined; /** - *The user pool ID for the user pool.
+ *The name of the resource server.
*/ - UserPoolId: string | undefined; + Name: string | undefined; /** - *The name of the resource server.
+ *The scope values to be set for the resource server.
*/ - Name: string | undefined; + Scopes?: ResourceServerScopeType[]; } export namespace UpdateResourceServerRequest { @@ -207,6 +277,13 @@ export namespace UpdateResourceServerResponse { *Represents the request to update user attributes.
*/ export interface UpdateUserAttributesRequest { + /** + *An array of name-value pairs representing user attributes.
+ *For custom attributes, you must prepend the custom:
prefix to the
+ * attribute name.
The access token for the request to update user attributes.
*/ @@ -246,22 +323,15 @@ export interface UpdateUserAttributesRequest { * */ ClientMetadata?: { [key: string]: string }; - - /** - *An array of name-value pairs representing user attributes.
- *For custom attributes, you must prepend the custom:
prefix to the
- * attribute name.
The subject of the email verification message.
- */ - EmailVerificationSubject?: string; - - /** - *SMS configuration.
- */ - SmsConfiguration?: SmsConfigurationType; - - /** - *The template for verification messages.
- */ - VerificationMessageTemplate?: VerificationMessageTemplateType; - - /** - *Used to enable advanced security risk detection. Set the key
- * AdvancedSecurityMode
to the value "AUDIT".
Email configuration.
- */ - EmailConfiguration?: EmailConfigurationType; - - /** - *The configuration for AdminCreateUser
requests.
Use this setting to define which verified available method a user can use to recover
- * their password when they call ForgotPassword
. It allows you to define a
- * preferred method when a user has more than one method available. With this setting, SMS
- * does not qualify for a valid password recovery mechanism if the user also has SMS MFA
- * enabled. In the absence of this setting, Cognito uses the legacy behavior to determine
- * the recovery method where SMS is preferred over email.
The user pool ID for the user pool you want to update.
*/ - AccountRecoverySetting?: AccountRecoverySettingType; + UserPoolId: string | undefined; /** *A container with the policies you wish to update in a user pool.
@@ -334,16 +368,10 @@ export interface UpdateUserPoolRequest { Policies?: UserPoolPolicyType; /** - *The tag keys and values to assign to the user pool. A tag is a label that you can use - * to categorize and manage user pools in different ways, such as by purpose, owner, - * environment, or other criteria.
- */ - UserPoolTags?: { [key: string]: string }; - - /** - *The contents of the SMS authentication message.
+ *The AWS Lambda configuration information from the request to update the user + * pool.
*/ - SmsAuthenticationMessage?: string; + LambdaConfig?: LambdaConfigType; /** *The attributes that are automatically verified when the Amazon Cognito service makes a @@ -352,24 +380,29 @@ export interface UpdateUserPoolRequest { AutoVerifiedAttributes?: (VerifiedAttributeType | string)[]; /** - *
The user pool ID for the user pool you want to update.
+ *A container with information about the SMS verification message.
*/ - UserPoolId: string | undefined; + SmsVerificationMessage?: string; /** - *A container with information about the SMS verification message.
+ *The contents of the email verification message.
*/ - SmsVerificationMessage?: string; + EmailVerificationMessage?: string; /** - *Device configuration.
+ *The subject of the email verification message.
*/ - DeviceConfiguration?: DeviceConfigurationType; + EmailVerificationSubject?: string; /** - *The contents of the email verification message.
+ *The template for verification messages.
*/ - EmailVerificationMessage?: string; + VerificationMessageTemplate?: VerificationMessageTemplateType; + + /** + *The contents of the SMS authentication message.
+ */ + SmsAuthenticationMessage?: string; /** *Can be one of the following values:
@@ -394,10 +427,47 @@ export interface UpdateUserPoolRequest { MfaConfiguration?: UserPoolMfaType | string; /** - *The AWS Lambda configuration information from the request to update the user - * pool.
+ *Device configuration.
*/ - LambdaConfig?: LambdaConfigType; + DeviceConfiguration?: DeviceConfigurationType; + + /** + *Email configuration.
+ */ + EmailConfiguration?: EmailConfigurationType; + + /** + *SMS configuration.
+ */ + SmsConfiguration?: SmsConfigurationType; + + /** + *The tag keys and values to assign to the user pool. A tag is a label that you can use + * to categorize and manage user pools in different ways, such as by purpose, owner, + * environment, or other criteria.
+ */ + UserPoolTags?: { [key: string]: string }; + + /** + *The configuration for AdminCreateUser
requests.
Used to enable advanced security risk detection. Set the key
+ * AdvancedSecurityMode
to the value "AUDIT".
Use this setting to define which verified available method a user can use to recover
+ * their password when they call ForgotPassword
. It allows you to define a
+ * preferred method when a user has more than one method available. With this setting, SMS
+ * does not qualify for a valid password recovery mechanism if the user also has SMS MFA
+ * enabled. In the absence of this setting, Cognito uses the legacy behavior to determine
+ * the recovery method where SMS is preferred over email.
The time limit, after which the ID token is no longer valid and cannot be used.
+ *The user pool ID for the user pool where you want to update the user pool + * client.
*/ - IdTokenValidity?: number; + UserPoolId: string | undefined; /** - *A list of allowed redirect (callback) URLs for the identity providers.
- *A redirect URI must:
- *Be an absolute URI.
- *Be registered with the authorization server.
- *Not include a fragment component.
- *See OAuth 2.0 - - * Redirection Endpoint.
- *Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing - * purposes only.
- *App callback URLs such as myapp://example are also supported.
+ *The ID of the client associated with the user pool.
*/ - CallbackURLs?: string[]; + ClientId: string | undefined; /** - *A list of allowed logout URLs for the identity providers.
+ *The client name from the update user pool client request.
*/ - LogoutURLs?: string[]; + ClientName?: string; /** - *Set to true if the client is allowed to follow the OAuth protocol when interacting - * with Cognito user pools.
+ *The time limit, in days, after which the refresh token is no longer valid and cannot + * be used.
*/ - AllowedOAuthFlowsUserPoolClient?: boolean; + RefreshTokenValidity?: number; /** - *The user pool ID for the user pool where you want to update the user pool - * client.
+ *The time limit, after which the access token is no longer valid and cannot be used.
*/ - UserPoolId: string | undefined; + AccessTokenValidity?: number; /** - *The units in which the validity times are represented in. Default for RefreshToken is days, and default for ID and access tokens are hours.
+ *The time limit, after which the ID token is no longer valid and cannot be used.
*/ - TokenValidityUnits?: TokenValidityUnitsType; + IdTokenValidity?: number; /** - *A list of provider names for the identity providers that are supported on this - * client.
+ *The units in which the validity times are represented in. Default for RefreshToken is days, and default for ID and access tokens are hours.
*/ - SupportedIdentityProviders?: string[]; + TokenValidityUnits?: TokenValidityUnitsType; /** *The read-only attributes of the user pool.
@@ -483,9 +535,9 @@ export interface UpdateUserPoolClientRequest { ReadAttributes?: string[]; /** - *The time limit, after which the access token is no longer valid and cannot be used.
+ *The writeable attributes of the user pool.
*/ - AccessTokenValidity?: number; + WriteAttributes?: string[]; /** *The authentication flows that are supported by the user pool clients. Flow names @@ -527,9 +579,37 @@ export interface UpdateUserPoolClientRequest { ExplicitAuthFlows?: (ExplicitAuthFlowsType | string)[]; /** - *
The writeable attributes of the user pool.
+ *A list of provider names for the identity providers that are supported on this + * client.
*/ - WriteAttributes?: string[]; + SupportedIdentityProviders?: string[]; + + /** + *A list of allowed redirect (callback) URLs for the identity providers.
+ *A redirect URI must:
+ *Be an absolute URI.
+ *Be registered with the authorization server.
+ *Not include a fragment component.
+ *See OAuth 2.0 - + * Redirection Endpoint.
+ *Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing + * purposes only.
+ *App callback URLs such as myapp://example are also supported.
+ */ + CallbackURLs?: string[]; + + /** + *A list of allowed logout URLs for the identity providers.
+ */ + LogoutURLs?: string[]; /** *The default redirect URI. Must be in the CallbackURLs
list.
The ID of the client associated with the user pool.
+ *The allowed OAuth flows.
+ *Set to code
to initiate a code grant flow, which provides an
+ * authorization code as the response. This code can be exchanged for access tokens with
+ * the token endpoint.
Set to implicit
to specify that the client should get the access token
+ * (and, optionally, ID token, based on scopes) directly.
Set to client_credentials
to specify that the client should get the
+ * access token (and, optionally, ID token, based on scopes) from the token endpoint using
+ * a combination of client and client_secret.
The allowed OAuth scopes. Possible values provided by OAuth are: phone
,
@@ -567,9 +655,22 @@ export interface UpdateUserPoolClientRequest {
AllowedOAuthScopes?: string[];
/**
- *
The client name from the update user pool client request.
+ *Set to true if the client is allowed to follow the OAuth protocol when interacting + * with Cognito user pools.
*/ - ClientName?: string; + AllowedOAuthFlowsUserPoolClient?: boolean; + + /** + *The Amazon Pinpoint analytics configuration for collecting metrics for this user + * pool.
+ *In regions where Pinpoint is not available, Cognito User Pools only supports sending events to Amazon Pinpoint projects in us-east-1. + * In regions where Pinpoint is available, Cognito User Pools will + * support sending events to Amazon Pinpoint projects within that same region. + *
+ *Use this setting to choose which errors and responses are returned by Cognito APIs @@ -601,37 +702,6 @@ export interface UpdateUserPoolClientRequest { * */ PreventUserExistenceErrors?: PreventUserExistenceErrorTypes | string; - - /** - *
The allowed OAuth flows.
- *Set to code
to initiate a code grant flow, which provides an
- * authorization code as the response. This code can be exchanged for access tokens with
- * the token endpoint.
Set to implicit
to specify that the client should get the access token
- * (and, optionally, ID token, based on scopes) directly.
Set to client_credentials
to specify that the client should get the
- * access token (and, optionally, ID token, based on scopes) from the token endpoint using
- * a combination of client and client_secret.
The time limit, in days, after which the refresh token is no longer valid and cannot - * be used.
- */ - RefreshTokenValidity?: number; - - /** - *The Amazon Pinpoint analytics configuration for collecting metrics for this user - * pool.
- *In regions where Pinpoint is not available, Cognito User Pools only supports sending events to Amazon Pinpoint projects in us-east-1. - * In regions where Pinpoint is available, Cognito User Pools will - * support sending events to Amazon Pinpoint projects within that same region. - *
- *The UpdateUserPoolDomain request input.
*/ export interface UpdateUserPoolDomainRequest { - /** - *The configuration for a custom domain that hosts the sign-up and sign-in pages for - * your application. Use this object to specify an SSL certificate that is managed by - * ACM.
- */ - CustomDomainConfig: CustomDomainConfigType | undefined; - /** *The domain name for the custom domain that hosts the sign-up and sign-in pages for
* your application. For example: auth.example.com
.
The configuration for a custom domain that hosts the sign-up and sign-in pages for + * your application. Use this object to specify an SSL certificate that is managed by + * ACM.
+ */ + CustomDomainConfig: CustomDomainConfigType | undefined; } export namespace UpdateUserPoolDomainRequest { @@ -738,14 +808,14 @@ export interface VerifySoftwareTokenRequest { Session?: string; /** - *The friendly device name.
+ *The one time password computed using the secret code returned by AssociateSoftwareToken".
*/ - FriendlyDeviceName?: string; + UserCode: string | undefined; /** - *The one time password computed using the secret code returned by AssociateSoftwareToken".
+ *The friendly device name.
*/ - UserCode: string | undefined; + FriendlyDeviceName?: string; } export namespace VerifySoftwareTokenRequest { @@ -762,15 +832,15 @@ export enum VerifySoftwareTokenResponseType { export interface VerifySoftwareTokenResponse { /** - *The session which should be passed both ways in challenge-response calls to the - * service.
+ *The status of the verify software token.
*/ - Session?: string; + Status?: VerifySoftwareTokenResponseType | string; /** - *The status of the verify software token.
+ *The session which should be passed both ways in challenge-response calls to the + * service.
*/ - Status?: VerifySoftwareTokenResponseType | string; + Session?: string; } export namespace VerifySoftwareTokenResponse { @@ -789,14 +859,14 @@ export interface VerifyUserAttributeRequest { AccessToken: string | undefined; /** - *The verification code in the request to verify user attributes.
+ *The attribute name in the request to verify user attributes.
*/ - Code: string | undefined; + AttributeName: string | undefined; /** - *The attribute name in the request to verify user attributes.
+ *The verification code in the request to verify user attributes.
*/ - AttributeName: string | undefined; + Code: string | undefined; } export namespace VerifyUserAttributeRequest { diff --git a/clients/client-cognito-identity-provider/protocols/Aws_json1_1.ts b/clients/client-cognito-identity-provider/protocols/Aws_json1_1.ts index 7ace6674917f..e024cf6ee8e3 100644 --- a/clients/client-cognito-identity-provider/protocols/Aws_json1_1.ts +++ b/clients/client-cognito-identity-provider/protocols/Aws_json1_1.ts @@ -362,6 +362,8 @@ import { CreateUserPoolRequest, CreateUserPoolResponse, CustomDomainConfigType, + CustomEmailLambdaVersionConfigType, + CustomSMSLambdaVersionConfigType, DeleteGroupRequest, DeleteIdentityProviderRequest, DeleteResourceServerRequest, @@ -517,10 +519,6 @@ import { UnexpectedLambdaException, UnsupportedIdentityProviderException, UnsupportedUserStateException, - UntagResourceRequest, - UntagResourceResponse, - UpdateAuthEventFeedbackRequest, - UpdateAuthEventFeedbackResponse, UserContextDataType, UserImportInProgressException, UserImportJobType, @@ -544,6 +542,10 @@ import { } from "../models/models_0"; import { EnableSoftwareTokenMFAException, + UntagResourceRequest, + UntagResourceResponse, + UpdateAuthEventFeedbackRequest, + UpdateAuthEventFeedbackResponse, UpdateDeviceStatusRequest, UpdateDeviceStatusResponse, UpdateGroupRequest, @@ -14061,6 +14063,26 @@ const serializeAws_json1_1CustomDomainConfigType = (input: CustomDomainConfigTyp }; }; +const serializeAws_json1_1CustomEmailLambdaVersionConfigType = ( + input: CustomEmailLambdaVersionConfigType, + context: __SerdeContext +): any => { + return { + ...(input.LambdaArn !== undefined && { LambdaArn: input.LambdaArn }), + ...(input.LambdaVersion !== undefined && { LambdaVersion: input.LambdaVersion }), + }; +}; + +const serializeAws_json1_1CustomSMSLambdaVersionConfigType = ( + input: CustomSMSLambdaVersionConfigType, + context: __SerdeContext +): any => { + return { + ...(input.LambdaArn !== undefined && { LambdaArn: input.LambdaArn }), + ...(input.LambdaVersion !== undefined && { LambdaVersion: input.LambdaVersion }), + }; +}; + const serializeAws_json1_1DeleteGroupRequest = (input: DeleteGroupRequest, context: __SerdeContext): any => { return { ...(input.GroupName !== undefined && { GroupName: input.GroupName }), @@ -14390,8 +14412,15 @@ const serializeAws_json1_1InitiateAuthRequest = (input: InitiateAuthRequest, con const serializeAws_json1_1LambdaConfigType = (input: LambdaConfigType, context: __SerdeContext): any => { return { ...(input.CreateAuthChallenge !== undefined && { CreateAuthChallenge: input.CreateAuthChallenge }), + ...(input.CustomEmailSender !== undefined && { + CustomEmailSender: serializeAws_json1_1CustomEmailLambdaVersionConfigType(input.CustomEmailSender, context), + }), ...(input.CustomMessage !== undefined && { CustomMessage: input.CustomMessage }), + ...(input.CustomSMSSender !== undefined && { + CustomSMSSender: serializeAws_json1_1CustomSMSLambdaVersionConfigType(input.CustomSMSSender, context), + }), ...(input.DefineAuthChallenge !== undefined && { DefineAuthChallenge: input.DefineAuthChallenge }), + ...(input.KMSKeyID !== undefined && { KMSKeyID: input.KMSKeyID }), ...(input.PostAuthentication !== undefined && { PostAuthentication: input.PostAuthentication }), ...(input.PostConfirmation !== undefined && { PostConfirmation: input.PostConfirmation }), ...(input.PreAuthentication !== undefined && { PreAuthentication: input.PreAuthentication }), @@ -15857,6 +15886,28 @@ const deserializeAws_json1_1CustomDomainConfigType = (output: any, context: __Se } as any; }; +const deserializeAws_json1_1CustomEmailLambdaVersionConfigType = ( + output: any, + context: __SerdeContext +): CustomEmailLambdaVersionConfigType => { + return { + LambdaArn: output.LambdaArn !== undefined && output.LambdaArn !== null ? output.LambdaArn : undefined, + LambdaVersion: + output.LambdaVersion !== undefined && output.LambdaVersion !== null ? output.LambdaVersion : undefined, + } as any; +}; + +const deserializeAws_json1_1CustomSMSLambdaVersionConfigType = ( + output: any, + context: __SerdeContext +): CustomSMSLambdaVersionConfigType => { + return { + LambdaArn: output.LambdaArn !== undefined && output.LambdaArn !== null ? output.LambdaArn : undefined, + LambdaVersion: + output.LambdaVersion !== undefined && output.LambdaVersion !== null ? output.LambdaVersion : undefined, + } as any; +}; + const deserializeAws_json1_1DeleteUserAttributesResponse = ( output: any, context: __SerdeContext @@ -16387,12 +16438,21 @@ const deserializeAws_json1_1LambdaConfigType = (output: any, context: __SerdeCon output.CreateAuthChallenge !== undefined && output.CreateAuthChallenge !== null ? output.CreateAuthChallenge : undefined, + CustomEmailSender: + output.CustomEmailSender !== undefined && output.CustomEmailSender !== null + ? deserializeAws_json1_1CustomEmailLambdaVersionConfigType(output.CustomEmailSender, context) + : undefined, CustomMessage: output.CustomMessage !== undefined && output.CustomMessage !== null ? output.CustomMessage : undefined, + CustomSMSSender: + output.CustomSMSSender !== undefined && output.CustomSMSSender !== null + ? deserializeAws_json1_1CustomSMSLambdaVersionConfigType(output.CustomSMSSender, context) + : undefined, DefineAuthChallenge: output.DefineAuthChallenge !== undefined && output.DefineAuthChallenge !== null ? output.DefineAuthChallenge : undefined, + KMSKeyID: output.KMSKeyID !== undefined && output.KMSKeyID !== null ? output.KMSKeyID : undefined, PostAuthentication: output.PostAuthentication !== undefined && output.PostAuthentication !== null ? output.PostAuthentication diff --git a/clients/client-comprehend/Comprehend.ts b/clients/client-comprehend/Comprehend.ts index 65448b9f9023..6cb9819ef3b3 100644 --- a/clients/client-comprehend/Comprehend.ts +++ b/clients/client-comprehend/Comprehend.ts @@ -89,6 +89,11 @@ import { DescribeEntityRecognizerCommandInput, DescribeEntityRecognizerCommandOutput, } from "./commands/DescribeEntityRecognizerCommand"; +import { + DescribeEventsDetectionJobCommand, + DescribeEventsDetectionJobCommandInput, + DescribeEventsDetectionJobCommandOutput, +} from "./commands/DescribeEventsDetectionJobCommand"; import { DescribeKeyPhrasesDetectionJobCommand, DescribeKeyPhrasesDetectionJobCommandInput, @@ -169,6 +174,11 @@ import { ListEntityRecognizersCommandInput, ListEntityRecognizersCommandOutput, } from "./commands/ListEntityRecognizersCommand"; +import { + ListEventsDetectionJobsCommand, + ListEventsDetectionJobsCommandInput, + ListEventsDetectionJobsCommandOutput, +} from "./commands/ListEventsDetectionJobsCommand"; import { ListKeyPhrasesDetectionJobsCommand, ListKeyPhrasesDetectionJobsCommandInput, @@ -209,6 +219,11 @@ import { StartEntitiesDetectionJobCommandInput, StartEntitiesDetectionJobCommandOutput, } from "./commands/StartEntitiesDetectionJobCommand"; +import { + StartEventsDetectionJobCommand, + StartEventsDetectionJobCommandInput, + StartEventsDetectionJobCommandOutput, +} from "./commands/StartEventsDetectionJobCommand"; import { StartKeyPhrasesDetectionJobCommand, StartKeyPhrasesDetectionJobCommandInput, @@ -239,6 +254,11 @@ import { StopEntitiesDetectionJobCommandInput, StopEntitiesDetectionJobCommandOutput, } from "./commands/StopEntitiesDetectionJobCommand"; +import { + StopEventsDetectionJobCommand, + StopEventsDetectionJobCommandInput, + StopEventsDetectionJobCommandOutput, +} from "./commands/StopEventsDetectionJobCommand"; import { StopKeyPhrasesDetectionJobCommand, StopKeyPhrasesDetectionJobCommandInput, @@ -893,6 +913,38 @@ export class Comprehend extends ComprehendClient { } } + /** + *Gets the status and details of an events detection job.
+ */ + public describeEventsDetectionJob( + args: DescribeEventsDetectionJobCommandInput, + options?: __HttpHandlerOptions + ): PromiseGets the properties associated with a key phrases detection job. Use this operation to get * the status of a detection job.
@@ -1416,6 +1468,38 @@ export class Comprehend extends ComprehendClient { } } + /** + *Gets a list of the events detection jobs that you have submitted.
+ */ + public listEventsDetectionJobs( + args: ListEventsDetectionJobsCommandInput, + options?: __HttpHandlerOptions + ): PromiseGet a list of key phrase detection jobs that you have submitted.
*/ @@ -1679,6 +1763,38 @@ export class Comprehend extends ComprehendClient { } } + /** + *Starts an asynchronous event detection job for a collection of documents.
+ */ + public startEventsDetectionJob( + args: StartEventsDetectionJobCommandInput, + options?: __HttpHandlerOptions + ): PromiseStarts an asynchronous key phrase detection job for a collection of documents. Use the * operation to track the status of a @@ -1894,6 +2010,38 @@ export class Comprehend extends ComprehendClient { } } + /** + *
Stops an events detection job in progress.
+ */ + public stopEventsDetectionJob( + args: StopEventsDetectionJobCommandInput, + options?: __HttpHandlerOptions + ): PromiseStops a key phrases detection job in progress.
*If the job state is IN_PROGRESS
the job is marked for termination and put
diff --git a/clients/client-comprehend/ComprehendClient.ts b/clients/client-comprehend/ComprehendClient.ts
index 37b375997e34..6ce8ada5395b 100644
--- a/clients/client-comprehend/ComprehendClient.ts
+++ b/clients/client-comprehend/ComprehendClient.ts
@@ -55,6 +55,10 @@ import {
DescribeEntityRecognizerCommandInput,
DescribeEntityRecognizerCommandOutput,
} from "./commands/DescribeEntityRecognizerCommand";
+import {
+ DescribeEventsDetectionJobCommandInput,
+ DescribeEventsDetectionJobCommandOutput,
+} from "./commands/DescribeEventsDetectionJobCommand";
import {
DescribeKeyPhrasesDetectionJobCommandInput,
DescribeKeyPhrasesDetectionJobCommandOutput,
@@ -101,6 +105,10 @@ import {
ListEntityRecognizersCommandInput,
ListEntityRecognizersCommandOutput,
} from "./commands/ListEntityRecognizersCommand";
+import {
+ ListEventsDetectionJobsCommandInput,
+ ListEventsDetectionJobsCommandOutput,
+} from "./commands/ListEventsDetectionJobsCommand";
import {
ListKeyPhrasesDetectionJobsCommandInput,
ListKeyPhrasesDetectionJobsCommandOutput,
@@ -133,6 +141,10 @@ import {
StartEntitiesDetectionJobCommandInput,
StartEntitiesDetectionJobCommandOutput,
} from "./commands/StartEntitiesDetectionJobCommand";
+import {
+ StartEventsDetectionJobCommandInput,
+ StartEventsDetectionJobCommandOutput,
+} from "./commands/StartEventsDetectionJobCommand";
import {
StartKeyPhrasesDetectionJobCommandInput,
StartKeyPhrasesDetectionJobCommandOutput,
@@ -157,6 +169,10 @@ import {
StopEntitiesDetectionJobCommandInput,
StopEntitiesDetectionJobCommandOutput,
} from "./commands/StopEntitiesDetectionJobCommand";
+import {
+ StopEventsDetectionJobCommandInput,
+ StopEventsDetectionJobCommandOutput,
+} from "./commands/StopEventsDetectionJobCommand";
import {
StopKeyPhrasesDetectionJobCommandInput,
StopKeyPhrasesDetectionJobCommandOutput,
@@ -248,6 +264,7 @@ export type ServiceInputTypes =
| DescribeEndpointCommandInput
| DescribeEntitiesDetectionJobCommandInput
| DescribeEntityRecognizerCommandInput
+ | DescribeEventsDetectionJobCommandInput
| DescribeKeyPhrasesDetectionJobCommandInput
| DescribePiiEntitiesDetectionJobCommandInput
| DescribeSentimentDetectionJobCommandInput
@@ -264,6 +281,7 @@ export type ServiceInputTypes =
| ListEndpointsCommandInput
| ListEntitiesDetectionJobsCommandInput
| ListEntityRecognizersCommandInput
+ | ListEventsDetectionJobsCommandInput
| ListKeyPhrasesDetectionJobsCommandInput
| ListPiiEntitiesDetectionJobsCommandInput
| ListSentimentDetectionJobsCommandInput
@@ -272,12 +290,14 @@ export type ServiceInputTypes =
| StartDocumentClassificationJobCommandInput
| StartDominantLanguageDetectionJobCommandInput
| StartEntitiesDetectionJobCommandInput
+ | StartEventsDetectionJobCommandInput
| StartKeyPhrasesDetectionJobCommandInput
| StartPiiEntitiesDetectionJobCommandInput
| StartSentimentDetectionJobCommandInput
| StartTopicsDetectionJobCommandInput
| StopDominantLanguageDetectionJobCommandInput
| StopEntitiesDetectionJobCommandInput
+ | StopEventsDetectionJobCommandInput
| StopKeyPhrasesDetectionJobCommandInput
| StopPiiEntitiesDetectionJobCommandInput
| StopSentimentDetectionJobCommandInput
@@ -306,6 +326,7 @@ export type ServiceOutputTypes =
| DescribeEndpointCommandOutput
| DescribeEntitiesDetectionJobCommandOutput
| DescribeEntityRecognizerCommandOutput
+ | DescribeEventsDetectionJobCommandOutput
| DescribeKeyPhrasesDetectionJobCommandOutput
| DescribePiiEntitiesDetectionJobCommandOutput
| DescribeSentimentDetectionJobCommandOutput
@@ -322,6 +343,7 @@ export type ServiceOutputTypes =
| ListEndpointsCommandOutput
| ListEntitiesDetectionJobsCommandOutput
| ListEntityRecognizersCommandOutput
+ | ListEventsDetectionJobsCommandOutput
| ListKeyPhrasesDetectionJobsCommandOutput
| ListPiiEntitiesDetectionJobsCommandOutput
| ListSentimentDetectionJobsCommandOutput
@@ -330,12 +352,14 @@ export type ServiceOutputTypes =
| StartDocumentClassificationJobCommandOutput
| StartDominantLanguageDetectionJobCommandOutput
| StartEntitiesDetectionJobCommandOutput
+ | StartEventsDetectionJobCommandOutput
| StartKeyPhrasesDetectionJobCommandOutput
| StartPiiEntitiesDetectionJobCommandOutput
| StartSentimentDetectionJobCommandOutput
| StartTopicsDetectionJobCommandOutput
| StopDominantLanguageDetectionJobCommandOutput
| StopEntitiesDetectionJobCommandOutput
+ | StopEventsDetectionJobCommandOutput
| StopKeyPhrasesDetectionJobCommandOutput
| StopPiiEntitiesDetectionJobCommandOutput
| StopSentimentDetectionJobCommandOutput
diff --git a/clients/client-comprehend/commands/DescribeEventsDetectionJobCommand.ts b/clients/client-comprehend/commands/DescribeEventsDetectionJobCommand.ts
new file mode 100644
index 000000000000..cea6dabf13c7
--- /dev/null
+++ b/clients/client-comprehend/commands/DescribeEventsDetectionJobCommand.ts
@@ -0,0 +1,91 @@
+import { ComprehendClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComprehendClient";
+import { DescribeEventsDetectionJobRequest, DescribeEventsDetectionJobResponse } from "../models/models_0";
+import {
+ deserializeAws_json1_1DescribeEventsDetectionJobCommand,
+ serializeAws_json1_1DescribeEventsDetectionJobCommand,
+} from "../protocols/Aws_json1_1";
+import { getSerdePlugin } from "@aws-sdk/middleware-serde";
+import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
+import { Command as $Command } from "@aws-sdk/smithy-client";
+import {
+ FinalizeHandlerArguments,
+ Handler,
+ HandlerExecutionContext,
+ MiddlewareStack,
+ HttpHandlerOptions as __HttpHandlerOptions,
+ MetadataBearer as __MetadataBearer,
+ SerdeContext as __SerdeContext,
+} from "@aws-sdk/types";
+
+export type DescribeEventsDetectionJobCommandInput = DescribeEventsDetectionJobRequest;
+export type DescribeEventsDetectionJobCommandOutput = DescribeEventsDetectionJobResponse & __MetadataBearer;
+
+/**
+ *
Gets the status and details of an events detection job.
+ */ +export class DescribeEventsDetectionJobCommand extends $Command< + DescribeEventsDetectionJobCommandInput, + DescribeEventsDetectionJobCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeEventsDetectionJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackGets a list of the events detection jobs that you have submitted.
+ */ +export class ListEventsDetectionJobsCommand extends $Command< + ListEventsDetectionJobsCommandInput, + ListEventsDetectionJobsCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListEventsDetectionJobsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackStarts an asynchronous event detection job for a collection of documents.
+ */ +export class StartEventsDetectionJobCommand extends $Command< + StartEventsDetectionJobCommandInput, + StartEventsDetectionJobCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartEventsDetectionJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackStops an events detection job in progress.
+ */ +export class StopEventsDetectionJobCommand extends $Command< + StopEventsDetectionJobCommandInput, + StopEventsDetectionJobCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StopEventsDetectionJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe Amazon S3 location of the augmented manifest file.
+ */ + S3Uri: string | undefined; + /** *The JSON attribute that contains the annotations for your training documents. The number * of attribute names that you specify depends on whether your augmented manifest file is the @@ -17,11 +22,6 @@ export interface AugmentedManifestsListItem { * an individual job.
*/ AttributeNames: string[] | undefined; - - /** - *The Amazon S3 location of the augmented manifest file.
- */ - S3Uri: string | undefined; } export namespace AugmentedManifestsListItem { @@ -52,6 +52,11 @@ export namespace BatchDetectDominantLanguageRequest { * error. */ export interface BatchItemError { + /** + *The zero-based index of the document in the input list.
+ */ + Index?: number; + /** *The numeric error code of the error.
*/ @@ -61,11 +66,6 @@ export interface BatchItemError { *A text description of the error.
*/ ErrorMessage?: string; - - /** - *The zero-based index of the document in the input list.
- */ - Index?: number; } export namespace BatchItemError { @@ -79,18 +79,18 @@ export namespace BatchItemError { * confidence that Amazon Comprehend has in the accuracy of the detection. */ export interface DominantLanguage { - /** - *The level of confidence that Amazon Comprehend has in the accuracy of the - * detection.
- */ - Score?: number; - /** *The RFC 5646 language code for the dominant language. For more information about RFC * 5646, see Tags for Identifying * Languages on the IETF Tools web site.
*/ LanguageCode?: string; + + /** + *The level of confidence that Amazon Comprehend has in the accuracy of the + * detection.
+ */ + Score?: number; } export namespace DominantLanguage { @@ -106,15 +106,15 @@ export namespace DominantLanguage { */ export interface BatchDetectDominantLanguageItemResult { /** - *One or more DominantLanguage objects describing the dominant - * languages in the document.
+ *The zero-based index of the document in the input list.
*/ - Languages?: DominantLanguage[]; + Index?: number; /** - *The zero-based index of the document in the input list.
+ *One or more DominantLanguage objects describing the dominant + * languages in the document.
*/ - Index?: number; + Languages?: DominantLanguage[]; } export namespace BatchDetectDominantLanguageItemResult { @@ -124,14 +124,6 @@ export namespace BatchDetectDominantLanguageItemResult { } export interface BatchDetectDominantLanguageResponse { - /** - *A list containing one object for each document
- * that contained an error. The results are sorted in ascending order by the Index
- * field and match the order of the documents in the input list. If there are no errors in the
- * batch, the ErrorList
is empty.
A list of objects
* containing the results of the operation. The results are sorted in ascending order by the
@@ -139,6 +131,14 @@ export interface BatchDetectDominantLanguageResponse {
* the documents contain an error, the ResultList
is empty.
A list containing one object for each document
+ * that contained an error. The results are sorted in ascending order by the Index
+ * field and match the order of the documents in the input list. If there are no errors in the
+ * batch, the ErrorList
is empty.
The language of the input documents. You can specify any of the primary languages - * supported by Amazon Comprehend. All documents must be in the same language.
- */ - LanguageCode: LanguageCode | string | undefined; - /** *A list containing the text of the input documents. The list can contain a maximum of 25 * documents. Each document must contain fewer than 5,000 bytes of UTF-8 encoded * characters.
*/ TextList: string[] | undefined; + + /** + *The language of the input documents. You can specify any of the primary languages + * supported by Amazon Comprehend. All documents must be in the same language.
+ */ + LanguageCode: LanguageCode | string | undefined; } export namespace BatchDetectEntitiesRequest { @@ -263,17 +263,10 @@ export enum EntityType { */ export interface Entity { /** - *A character offset in the input text that shows where the entity ends. The offset - * returns the position of each UTF-8 code point in the string. A code point - * is the abstract character from a particular graphical representation. For example, a - * multi-byte UTF-8 character maps to a single code point.
- */ - EndOffset?: number; - - /** - *The text of the entity.
+ *The level of confidence that Amazon Comprehend has in the accuracy of the + * detection.
*/ - Text?: string; + Score?: number; /** *The entity's type.
@@ -281,10 +274,9 @@ export interface Entity { Type?: EntityType | string; /** - *The level of confidence that Amazon Comprehend has in the accuracy of the - * detection.
+ *The text of the entity.
*/ - Score?: number; + Text?: string; /** *A character offset in the input text that shows where the entity begins (the first @@ -294,6 +286,14 @@ export interface Entity { * point.
*/ BeginOffset?: number; + + /** + *A character offset in the input text that shows where the entity ends. The offset + * returns the position of each UTF-8 code point in the string. A code point + * is the abstract character from a particular graphical representation. For example, a + * multi-byte UTF-8 character maps to a single code point.
+ */ + EndOffset?: number; } export namespace Entity { @@ -396,12 +396,15 @@ export namespace BatchDetectKeyPhrasesRequest { */ export interface KeyPhrase { /** - *A character offset in the input text where the key phrase ends. The offset returns the
- * position of each UTF-8 code point in the string. A code point
is the abstract
- * character from a particular graphical representation. For example, a multi-byte UTF-8
- * character maps to a single code point.
The level of confidence that Amazon Comprehend has in the accuracy of the + * detection.
*/ - EndOffset?: number; + Score?: number; + + /** + *The text of a key noun phrase.
+ */ + Text?: string; /** *A character offset in the input text that shows where the key phrase begins (the first @@ -413,15 +416,12 @@ export interface KeyPhrase { BeginOffset?: number; /** - *
The text of a key noun phrase.
- */ - Text?: string; - - /** - *The level of confidence that Amazon Comprehend has in the accuracy of the - * detection.
+ *A character offset in the input text where the key phrase ends. The offset returns the
+ * position of each UTF-8 code point in the string. A code point
is the abstract
+ * character from a particular graphical representation. For example, a multi-byte UTF-8
+ * character maps to a single code point.
The level of confidence that Amazon Comprehend has in the accuracy of its detection of
- * the MIXED
sentiment.
The level of confidence that Amazon Comprehend has in the accuracy of its detection of
* the POSITIVE
sentiment.
NEUTRAL
sentiment.
*/
Neutral?: number;
+
+ /**
+ * The level of confidence that Amazon Comprehend has in the accuracy of its detection of
+ * the MIXED
sentiment.
The zero-based index of the document in the input list.
+ */ + Index?: number; + /** *The sentiment detected in the document.
*/ @@ -559,11 +564,6 @@ export interface BatchDetectSentimentItemResult { * detection. */ SentimentScore?: SentimentScore; - - /** - *The zero-based index of the document in the input list.
- */ - Index?: number; } export namespace BatchDetectSentimentItemResult { @@ -573,14 +573,6 @@ export namespace BatchDetectSentimentItemResult { } export interface BatchDetectSentimentResponse { - /** - *A list containing one object for each document
- * that contained an error. The results are sorted in ascending order by the Index
- * field and match the order of the documents in the input list. If there are no errors in the
- * batch, the ErrorList
is empty.
A list of objects containing the
* results of the operation. The results are sorted in ascending order by the Index
@@ -588,6 +580,14 @@ export interface BatchDetectSentimentResponse {
* an error, the ResultList
is empty.
A list containing one object for each document
+ * that contained an error. The results are sorted in ascending order by the Index
+ * field and match the order of the documents in the input list. If there are no errors in the
+ * batch, the ErrorList
is empty.
The confidence that Amazon Comprehend has that the part of speech was correctly - * identified.
+ *Identifies the part of speech that the token represents.
*/ - Score?: number; + Tag?: PartOfSpeechTagType | string; /** - *Identifies the part of speech that the token represents.
+ *The confidence that Amazon Comprehend has that the part of speech was correctly + * identified.
*/ - Tag?: PartOfSpeechTagType | string; + Score?: number; } export namespace PartOfSpeechTag { @@ -678,6 +678,11 @@ export namespace PartOfSpeechTag { * There is one syntax token record for each word in the source text. */ export interface SyntaxToken { + /** + *A unique identifier for a token.
+ */ + TokenId?: number; + /** *The word that was recognized in the source text.
*/ @@ -689,11 +694,6 @@ export interface SyntaxToken { */ BeginOffset?: number; - /** - *A unique identifier for a token.
- */ - TokenId?: number; - /** *The zero-based offset from the beginning of the source text to the last character in the * word.
@@ -719,14 +719,14 @@ export namespace SyntaxToken { */ export interface BatchDetectSyntaxItemResult { /** - *The syntax tokens for the words in the document, one token for each word.
+ *The zero-based index of the document in the input list.
*/ - SyntaxTokens?: SyntaxToken[]; + Index?: number; /** - *The zero-based index of the document in the input list.
+ *The syntax tokens for the words in the document, one token for each word.
*/ - Index?: number; + SyntaxTokens?: SyntaxToken[]; } export namespace BatchDetectSyntaxItemResult { @@ -736,14 +736,6 @@ export namespace BatchDetectSyntaxItemResult { } export interface BatchDetectSyntaxResponse { - /** - *A list containing one object for each document that
- * contained an error. The results are sorted in ascending order by the Index
field
- * and match the order of the documents in the input list. If there are no errors in the batch,
- * the ErrorList
is empty.
A list of objects containing the results
* of the operation. The results are sorted in ascending order by the Index
field
@@ -751,6 +743,14 @@ export interface BatchDetectSyntaxResponse {
* error, the ResultList
is empty.
A list containing one object for each document that
+ * contained an error. The results are sorted in ascending order by the Index
field
+ * and match the order of the documents in the input list. If there are no errors in the batch,
+ * the ErrorList
is empty.
A measure of the usefulness of the recognizer results in the test data. High precision - * means that the recognizer returned substantially more relevant results than irrelevant ones. - * Unlike the Precision metric which comes from averaging the precision of all available labels, - * this is based on the overall score of all precision scores added together.
- */ - MicroPrecision?: number; - /** *The fraction of the labels that were correct recognized. It is computed by dividing the * number of labels in the test documents that were correctly recognized by the total number of @@ -780,12 +772,32 @@ export interface ClassifierEvaluationMetrics { Accuracy?: number; /** - *
A measure of how accurate the classifier results are for the test data. It is a
- * combination of the Micro Precision
and Micro Recall
values. The
- * Micro F1Score
is the harmonic mean of the two scores. The highest score is 1,
- * and the worst score is 0.
A measure of the usefulness of the classifier results in the test data. High precision + * means that the classifier returned substantially more relevant results than irrelevant + * ones.
*/ - MicroF1Score?: number; + Precision?: number; + + /** + *A measure of how complete the classifier results are for the test data. High recall means + * that the classifier returned most of the relevant results.
+ */ + Recall?: number; + + /** + *A measure of how accurate the classifier results are for the test data. It is derived from
+ * the Precision
and Recall
values. The F1Score
is the
+ * harmonic average of the two scores. The highest score is 1, and the worst score is 0.
A measure of the usefulness of the recognizer results in the test data. High precision + * means that the recognizer returned substantially more relevant results than irrelevant ones. + * Unlike the Precision metric which comes from averaging the precision of all available labels, + * this is based on the overall score of all precision scores added together.
+ */ + MicroPrecision?: number; /** *A measure of how complete the classifier results are for the test data. High recall means @@ -798,11 +810,12 @@ export interface ClassifierEvaluationMetrics { MicroRecall?: number; /** - *
A measure of the usefulness of the classifier results in the test data. High precision - * means that the classifier returned substantially more relevant results than irrelevant - * ones.
+ *A measure of how accurate the classifier results are for the test data. It is a
+ * combination of the Micro Precision
and Micro Recall
values. The
+ * Micro F1Score
is the harmonic mean of the two scores. The highest score is 1,
+ * and the worst score is 0.
Indicates the fraction of labels that are incorrectly predicted. Also seen as the fraction @@ -810,20 +823,7 @@ export interface ClassifierEvaluationMetrics { * better.
*/ HammingLoss?: number; - - /** - *A measure of how accurate the classifier results are for the test data. It is derived from
- * the Precision
and Recall
values. The F1Score
is the
- * harmonic average of the two scores. The highest score is 1, and the worst score is 0.
A measure of how complete the classifier results are for the test data. High recall means - * that the classifier returned most of the relevant results.
- */ - Recall?: number; -} +} export namespace ClassifierEvaluationMetrics { export const filterSensitiveLog = (obj: ClassifierEvaluationMetrics): any => ({ @@ -846,17 +846,17 @@ export interface ClassifierMetadata { */ NumberOfTrainedDocuments?: number; - /** - *Describes the result metrics for the test data associated with an documentation - * classifier.
- */ - EvaluationMetrics?: ClassifierEvaluationMetrics; - /** *The number of documents in the input data that were used to test the classifier. Typically * this is 10 to 20 percent of the input documents, up to 10,000 documents.
*/ NumberOfTestDocuments?: number; + + /** + *Describes the result metrics for the test data associated with an documentation + * classifier.
+ */ + EvaluationMetrics?: ClassifierEvaluationMetrics; } export namespace ClassifierMetadata { @@ -889,14 +889,14 @@ export namespace ClassifyDocumentRequest { */ export interface DocumentClass { /** - *The confidence score that Amazon Comprehend has this class correctly attributed.
+ *The name of the class.
*/ - Score?: number; + Name?: string; /** - *The name of the class.
+ *The confidence score that Amazon Comprehend has this class correctly attributed.
*/ - Name?: string; + Score?: number; } export namespace DocumentClass { @@ -927,14 +927,6 @@ export namespace DocumentLabel { } export interface ClassifyDocumentResponse { - /** - *The labels used the document being analyzed. These are used for multi-label trained - * models. Individual labels represent different categories that are related in some manner and - * are not multually exclusive. For example, a movie can be just an action movie, or it can be an - * action movie, a science fiction movie, and a comedy, all at the same time.
- */ - Labels?: DocumentLabel[]; - /** *The classes used by the document being analyzed. These are used for multi-class trained * models. Individual classes are mutually exclusive and each document is expected to have only a @@ -942,6 +934,14 @@ export interface ClassifyDocumentResponse { * same time.
*/ Classes?: DocumentClass[]; + + /** + *The labels used the document being analyzed. These are used for multi-label trained + * models. Individual labels represent different categories that are related in some manner and + * are not mutually exclusive. For example, a movie can be just an action movie, or it can be an + * action movie, a science fiction movie, and a comedy, all at the same time.
+ */ + Labels?: DocumentLabel[]; } export namespace ClassifyDocumentResponse { @@ -976,16 +976,6 @@ export enum DocumentClassifierDataFormat { *For more information on how the input file is formatted, see how-document-classification-training-data.
*/ export interface DocumentClassifierInputDataConfig { - /** - *Indicates the delimiter used to separate each label for training a multi-label classifier. - * The default delimiter between labels is a pipe (|). You can use a different character as a - * delimiter (if it's an allowed character) by specifying it under Delimiter for labels. If the - * training documents use a delimiter other than the default or the delimiter you specify, the - * labels on that line will be combined to make a single unique label, such as - * LABELLABELLABEL.
- */ - LabelDelimiter?: string; - /** *The format of your training data:
*A list of augmented manifest files that provide training data for your custom model. An - * augmented manifest file is a labeled dataset that is produced by Amazon SageMaker Ground - * Truth.
- *This parameter is required if you set DataFormat
to
- * AUGMENTED_MANIFEST
.
The Amazon S3 URI for the input data. The S3 bucket must be in the same region as the API
* endpoint that you are calling. The URI can point to a single input file or it can provide the
@@ -1029,6 +1010,25 @@ export interface DocumentClassifierInputDataConfig {
* COMPREHEND_CSV
.
Indicates the delimiter used to separate each label for training a multi-label classifier. + * The default delimiter between labels is a pipe (|). You can use a different character as a + * delimiter (if it's an allowed character) by specifying it under Delimiter for labels. If the + * training documents use a delimiter other than the default or the delimiter you specify, the + * labels on that line will be combined to make a single unique label, such as + * LABELLABELLABEL.
+ */ + LabelDelimiter?: string; + + /** + *A list of augmented manifest files that provide training data for your custom model. An + * augmented manifest file is a labeled dataset that is produced by Amazon SageMaker Ground + * Truth.
+ *This parameter is required if you set DataFormat
to
+ * AUGMENTED_MANIFEST
.
Provides output results configuration parameters for custom classifier jobs.
*/ export interface DocumentClassifierOutputDataConfig { + /** + *When you use the OutputDataConfig
object while creating a custom
+ * classifier, you specify the Amazon S3 location where you want to write the confusion matrix.
+ * The URI must be in the same region as the API endpoint that you are calling. The location is
+ * used as the prefix for the actual location of this output file.
When the custom classifier job is finished, the service creates the output file in a
+ * directory specific to the job. The S3Uri
field contains the location of the
+ * output file, called output.tar.gz
. It is a compressed archive that contains the
+ * confusion matrix.
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the * output results from an analysis job. The KmsKeyId can be one of the following formats:
@@ -1071,18 +1083,6 @@ export interface DocumentClassifierOutputDataConfig { *When you use the OutputDataConfig
object while creating a custom
- * classifier, you specify the Amazon S3 location where you want to write the confusion matrix.
- * The URI must be in the same region as the API endpoint that you are calling. The location is
- * used as the prefix for the actual location of this output file.
When the custom classifier job is finished, the service creates the output file in a
- * directory specific to the job. The S3Uri
field contains the location of the
- * output file, called output.tar.gz
. It is a compressed archive that contains the
- * confusion matrix.
Configuration parameters for an optional private Virtual Private Cloud (VPC) containing - * the resources you are using for the job. For For more information, see Amazon + * the resources you are using for the job. For more information, see Amazon * VPC.
*/ export interface VpcConfig { @@ -1153,17 +1153,15 @@ export namespace VpcConfig { export interface CreateDocumentClassifierRequest { /** - *A unique identifier for the request. If you don't set the client request token, Amazon - * Comprehend generates one.
+ *The name of the document classifier.
*/ - ClientRequestToken?: string; + DocumentClassifierName: string | undefined; /** - *The language of the input documents. You can specify any of the following languages - * supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), - * Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.
+ *The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants + * Amazon Comprehend read access to your input data.
*/ - LanguageCode: LanguageCode | string | undefined; + DataAccessRoleArn: string | undefined; /** *Tags to be associated with the document classifier being created. A tag is a key-value @@ -1174,20 +1172,28 @@ export interface CreateDocumentClassifierRequest { Tags?: Tag[]; /** - *
Indicates the mode in which the classifier will be trained. The classifier can be trained - * in multi-class mode, which identifies one and only one class for each document, or multi-label - * mode, which identifies one or more labels for each document. In multi-label mode, multiple - * labels for an individual document are separated by a delimiter. The default delimiter between - * labels is a pipe (|).
+ *Specifies the format and location of the input data for the job.
*/ - Mode?: DocumentClassifierMode | string; + InputDataConfig: DocumentClassifierInputDataConfig | undefined; /** - *Configuration parameters for an optional private Virtual Private Cloud (VPC) containing - * the resources you are using for your custom classifier. For more information, see Amazon - * VPC.
+ *Enables the addition of output results configuration parameters for custom classifier + * jobs.
*/ - VpcConfig?: VpcConfig; + OutputDataConfig?: DocumentClassifierOutputDataConfig; + + /** + *A unique identifier for the request. If you don't set the client request token, Amazon + * Comprehend generates one.
+ */ + ClientRequestToken?: string; + + /** + *The language of the input documents. You can specify any of the following languages + * supported by Amazon Comprehend: German ("de"), English ("en"), Spanish ("es"), French ("fr"), + * Italian ("it"), or Portuguese ("pt"). All documents must be in the same language.
+ */ + LanguageCode: LanguageCode | string | undefined; /** *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt @@ -1208,26 +1214,20 @@ export interface CreateDocumentClassifierRequest { VolumeKmsKeyId?: string; /** - *
The name of the document classifier.
- */ - DocumentClassifierName: string | undefined; - - /** - *Specifies the format and location of the input data for the job.
- */ - InputDataConfig: DocumentClassifierInputDataConfig | undefined; - - /** - *Enables the addition of output results configuration parameters for custom classifier - * jobs.
+ *Configuration parameters for an optional private Virtual Private Cloud (VPC) containing + * the resources you are using for your custom classifier. For more information, see Amazon + * VPC.
*/ - OutputDataConfig?: DocumentClassifierOutputDataConfig; + VpcConfig?: VpcConfig; /** - *The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants - * Amazon Comprehend read access to your input data.
+ *Indicates the mode in which the classifier will be trained. The classifier can be trained + * in multi-class mode, which identifies one and only one class for each document, or multi-label + * mode, which identifies one or more labels for each document. In multi-label mode, multiple + * labels for an individual document are separated by a delimiter. The default delimiter between + * labels is a pipe (|).
*/ - DataAccessRoleArn: string | undefined; + Mode?: DocumentClassifierMode | string; } export namespace CreateDocumentClassifierRequest { @@ -1330,13 +1330,6 @@ export namespace TooManyTagsException { } export interface CreateEndpointRequest { - /** - *The desired number of inference units to be used by the model using this endpoint. - * - * Each inference unit represents of a throughput of 100 characters per second.
- */ - DesiredInferenceUnits: number | undefined; - /** *This is the descriptive suffix that becomes part of the EndpointArn
used for
* all subsequent requests to this resource.
Tags associated with the endpoint being created. A tag is a key-value pair that adds - * metadata to the endpoint. For example, a tag with "Sales" as the key might be added to an - * endpoint to indicate its use by the sales department.
+ *The desired number of inference units to be used by the model using this endpoint. + * + * Each inference unit represents of a throughput of 100 characters per second.
*/ - Tags?: Tag[]; + DesiredInferenceUnits: number | undefined; /** *An idempotency token provided by the customer. If this token matches a previous endpoint @@ -1362,6 +1355,13 @@ export interface CreateEndpointRequest { *
*/ ClientRequestToken?: string; + + /** + *Tags associated with the endpoint being created. A tag is a key-value pair that adds + * metadata to the endpoint. For example, a tag with "Sales" as the key might be added to an + * endpoint to indicate its use by the sales department.
+ */ + Tags?: Tag[]; } export namespace CreateEndpointRequest { @@ -1480,36 +1480,6 @@ export namespace EntityTypesListItem { *Specifies the format and location of the input data.
*/ export interface EntityRecognizerInputDataConfig { - /** - *The S3 location of the CSV file that annotates your training documents.
- */ - Annotations?: EntityRecognizerAnnotations; - - /** - *The entity types in the labeled training data that Amazon Comprehend uses to train the - * custom entity recognizer. Any entity types that you don't specify are ignored.
- *A maximum of 25 entity types can be used at one time to train an entity recognizer. Entity - * types must not contain the following invalid characters: \n (line break), \\n (escaped line - * break), \r (carriage return), \\r (escaped carriage return), \t (tab), \\t (escaped tab), - * space, and , (comma).
- */ - EntityTypes: EntityTypesListItem[] | undefined; - - /** - *A list of augmented manifest files that provide training data for your custom model. An - * augmented manifest file is a labeled dataset that is produced by Amazon SageMaker Ground - * Truth.
- *This parameter is required if you set DataFormat
to
- * AUGMENTED_MANIFEST
.
The S3 location of the CSV file that has the entity list for your custom entity - * recognizer.
- */ - EntityList?: EntityRecognizerEntityList; - /** *The format of your training data:
*The entity types in the labeled training data that Amazon Comprehend uses to train the + * custom entity recognizer. Any entity types that you don't specify are ignored.
+ *A maximum of 25 entity types can be used at one time to train an entity recognizer. Entity + * types must not contain the following invalid characters: \n (line break), \\n (escaped line + * break), \r (carriage return), \\r (escaped carriage return), \t (tab), \\t (escaped tab), + * space, and , (comma).
+ */ + EntityTypes: EntityTypesListItem[] | undefined; + /** *The S3 location of the folder that contains the training documents for your custom entity * recognizer.
@@ -1545,6 +1525,26 @@ export interface EntityRecognizerInputDataConfig { *COMPREHEND_CSV
.
*/
Documents?: EntityRecognizerDocuments;
+
+ /**
+ * The S3 location of the CSV file that annotates your training documents.
+ */ + Annotations?: EntityRecognizerAnnotations; + + /** + *The S3 location of the CSV file that has the entity list for your custom entity + * recognizer.
+ */ + EntityList?: EntityRecognizerEntityList; + + /** + *A list of augmented manifest files that provide training data for your custom model. An + * augmented manifest file is a labeled dataset that is produced by Amazon SageMaker Ground + * Truth.
+ *This parameter is required if you set DataFormat
to
+ * AUGMENTED_MANIFEST
.
Tags to be associated with the entity recognizer being created. A tag is a key-value pair - * that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with + *
The name given to the newly created recognizer. Recognizer names can be a maximum of 256 + * characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The name + * must be unique in the account/region.
+ */ + RecognizerName: string | undefined; + + /** + *The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants + * Amazon Comprehend read access to your input data.
+ */ + DataAccessRoleArn: string | undefined; + + /** + *Tags to be associated with the entity recognizer being created. A tag is a key-value pair + * that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with * "Sales" as the key might be added to a resource to indicate its use by the sales department. *
*/ Tags?: Tag[]; /** - *You can specify any of the following languages supported by Amazon Comprehend: English - * ("en"), Spanish ("es"), French ("fr"), Italian ("it"), German ("de"), or Portuguese ("pt"). - * All documents must be in the same language.
+ *Specifies the format and location of the input data. The S3 bucket containing the input + * data must be located in the same region as the entity recognizer being created.
*/ - LanguageCode: LanguageCode | string | undefined; + InputDataConfig: EntityRecognizerInputDataConfig | undefined; /** *A unique identifier for the request. If you don't set the client request token, Amazon @@ -1575,6 +1587,13 @@ export interface CreateEntityRecognizerRequest { */ ClientRequestToken?: string; + /** + *
You can specify any of the following languages supported by Amazon Comprehend: English + * ("en"), Spanish ("es"), French ("fr"), Italian ("it"), German ("de"), or Portuguese ("pt"). + * All documents must be in the same language.
+ */ + LanguageCode: LanguageCode | string | undefined; + /** *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt * data on the storage volume attached to the ML compute instance(s) that process the analysis @@ -1593,12 +1612,6 @@ export interface CreateEntityRecognizerRequest { */ VolumeKmsKeyId?: string; - /** - *
Specifies the format and location of the input data. The S3 bucket containing the input - * data must be located in the same region as the entity recognizer being created.
- */ - InputDataConfig: EntityRecognizerInputDataConfig | undefined; - /** *Configuration parameters for an optional private Virtual Private Cloud (VPC) containing * the resources you are using for your custom entity recognizer. For more information, see @@ -1606,19 +1619,6 @@ export interface CreateEntityRecognizerRequest { * VPC.
*/ VpcConfig?: VpcConfig; - - /** - *The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants - * Amazon Comprehend read access to your input data.
- */ - DataAccessRoleArn: string | undefined; - - /** - *The name given to the newly created recognizer. Recognizer names can be a maximum of 256 - * characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The name - * must be unique in the account/region.
- */ - RecognizerName: string | undefined; } export namespace CreateEntityRecognizerRequest { @@ -1776,6 +1776,18 @@ export enum JobStatus { * */ export interface OutputDataConfig { + /** + *When you use the OutputDataConfig
object with asynchronous operations, you
+ * specify the Amazon S3 location where you want to write the output data. The URI must be in the
+ * same region as the API endpoint that you are calling. The location is used as the prefix for
+ * the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a
+ * directory specific to the job. The S3Uri
field contains the location of the
+ * output file, called output.tar.gz
. It is a compressed archive that contains the
+ * ouput of the operation.
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the * output results from an analysis job. The KmsKeyId can be one of the following formats:
@@ -1801,18 +1813,6 @@ export interface OutputDataConfig { *When you use the OutputDataConfig
object with asynchronous operations, you
- * specify the Amazon S3 location where you want to write the output data. The URI must be in the
- * same region as the API endpoint that you are calling. The location is used as the prefix for
- * the actual location of the output file.
When the topic detection job is finished, the service creates an output file in a
- * directory specific to the job. The S3Uri
field contains the location of the
- * output file, called output.tar.gz
. It is a compressed archive that contains the
- * ouput of the operation.
The Amazon Resource Name (ARN) of the AWS identity and Access Management (IAM) role that - * grants Amazon Comprehend read access to your input data.
+ *The identifier assigned to the document classification job.
*/ - DataAccessRoleArn?: string; + JobId?: string; + + /** + *The name that you assigned to the document classification job.
+ */ + JobName?: string; /** *The current status of the document classification job. If the status is @@ -1838,11 +1842,24 @@ export interface DocumentClassificationJobProperties { JobStatus?: JobStatus | string; /** - *
Configuration parameters for a private Virtual Private Cloud (VPC) containing the - * resources you are using for your document classification job. For more information, see Amazon - * VPC.
+ *A description of the status of the job.
*/ - VpcConfig?: VpcConfig; + Message?: string; + + /** + *The time that the document classification job was submitted for processing.
+ */ + SubmitTime?: Date; + + /** + *The time that the document classification job completed.
+ */ + EndTime?: Date; + + /** + *The Amazon Resource Name (ARN) that identifies the document classifier.
+ */ + DocumentClassifierArn?: string; /** *The input data configuration that you supplied when you created the document @@ -1851,9 +1868,16 @@ export interface DocumentClassificationJobProperties { InputDataConfig?: InputDataConfig; /** - *
The identifier assigned to the document classification job.
+ *The output data configuration that you supplied when you created the document + * classification job.
*/ - JobId?: string; + OutputDataConfig?: OutputDataConfig; + + /** + *The Amazon Resource Name (ARN) of the AWS identity and Access Management (IAM) role that + * grants Amazon Comprehend read access to your input data.
+ */ + DataAccessRoleArn?: string; /** *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt @@ -1874,35 +1898,11 @@ export interface DocumentClassificationJobProperties { VolumeKmsKeyId?: string; /** - *
The time that the document classification job was submitted for processing.
- */ - SubmitTime?: Date; - - /** - *The Amazon Resource Name (ARN) that identifies the document classifier.
- */ - DocumentClassifierArn?: string; - - /** - *A description of the status of the job.
- */ - Message?: string; - - /** - *The name that you assigned to the document classification job.
- */ - JobName?: string; - - /** - *The time that the document classification job completed.
- */ - EndTime?: Date; - - /** - *The output data configuration that you supplied when you created the document - * classification job.
+ *Configuration parameters for a private Virtual Private Cloud (VPC) containing the + * resources you are using for your document classification job. For more information, see Amazon + * VPC.
*/ - OutputDataConfig?: OutputDataConfig; + VpcConfig?: VpcConfig; } export namespace DocumentClassificationJobProperties { @@ -1969,39 +1969,15 @@ export enum ModelStatus { */ export interface DocumentClassifierProperties { /** - *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt - * data on the storage volume attached to the ML compute instance(s) that process the analysis - * job. The VolumeKmsKeyId can be either of the following formats:
- *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
- *
Amazon Resource Name (ARN) of a KMS Key:
- * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
- *
Provides output results configuration parameters for custom classifier jobs.
- */ - OutputDataConfig?: DocumentClassifierOutputDataConfig; - - /** - *Indicates the time when the training starts on documentation classifiers. You are billed - * for the time interval between this time and the value of TrainingEndTime.
+ *The Amazon Resource Name (ARN) that identifies the document classifier.
*/ - TrainingStartTime?: Date; + DocumentClassifierArn?: string; /** - *The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants - * Amazon Comprehend read access to your input data.
+ *The language code for the language of the documents that the classifier was trained + * on.
*/ - DataAccessRoleArn?: string; + LanguageCode?: LanguageCode | string; /** *The status of the document classifier. If the status is TRAINED
the
@@ -2011,16 +1987,9 @@ export interface DocumentClassifierProperties {
Status?: ModelStatus | string;
/**
- *
Information about the document classifier, including the number of documents used for - * training the classifier, the number of documents used for test the classifier, and an accuracy - * rating.
- */ - ClassifierMetadata?: ClassifierMetadata; - - /** - *The Amazon Resource Name (ARN) that identifies the document classifier.
+ *Additional information about the status of the classifier.
*/ - DocumentClassifierArn?: string; + Message?: string; /** *The time that the document classifier was submitted for training.
@@ -2028,16 +1997,15 @@ export interface DocumentClassifierProperties { SubmitTime?: Date; /** - *Indicates the mode in which the specific classifier was trained. This also indicates the - * format of input documents and the format of the confusion matrix. Each classifier can only be - * trained in one mode and this cannot be changed once the classifier is trained.
+ *The time that training the document classifier completed.
*/ - Mode?: DocumentClassifierMode | string; + EndTime?: Date; /** - *Additional information about the status of the classifier.
+ *Indicates the time when the training starts on documentation classifiers. You are billed + * for the time interval between this time and the value of TrainingEndTime.
*/ - Message?: string; + TrainingStartTime?: Date; /** *The time that training of the document classifier was completed. Indicates the time when @@ -2046,17 +2014,48 @@ export interface DocumentClassifierProperties { */ TrainingEndTime?: Date; - /** - *
The time that training the document classifier completed.
- */ - EndTime?: Date; - /** *The input data configuration that you supplied when you created the document classifier * for training.
*/ InputDataConfig?: DocumentClassifierInputDataConfig; + /** + *Provides output results configuration parameters for custom classifier jobs.
+ */ + OutputDataConfig?: DocumentClassifierOutputDataConfig; + + /** + *Information about the document classifier, including the number of documents used for + * training the classifier, the number of documents used for test the classifier, and an accuracy + * rating.
+ */ + ClassifierMetadata?: ClassifierMetadata; + + /** + *The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants + * Amazon Comprehend read access to your input data.
+ */ + DataAccessRoleArn?: string; + + /** + *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt + * data on the storage volume attached to the ML compute instance(s) that process the analysis + * job. The VolumeKmsKeyId can be either of the following formats:
+ *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Amazon Resource Name (ARN) of a KMS Key:
+ * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Configuration parameters for a private Virtual Private Cloud (VPC) containing the
* resources you are using for your custom classifier. For more information, see Amazon
@@ -2065,10 +2064,11 @@ export interface DocumentClassifierProperties {
VpcConfig?: VpcConfig;
/**
- * The language code for the language of the documents that the classifier was trained
- * on. Indicates the mode in which the specific classifier was trained. This also indicates the
+ * format of input documents and the format of the confusion matrix. Each classifier can only be
+ * trained in one mode and this cannot be changed once the classifier is trained. The input data configuration that you supplied when you created the dominant language
- * detection job. The identifier assigned to the dominant language detection job. The time that the dominant language detection job was submitted for processing. The name that you assigned to the dominant language detection job. The time that the dominant language detection job completed. The current status of the dominant language detection job. If the status is
+ * The name that you assigned to the dominant language detection job. A description for the status of a job. Configuration parameters for a private Virtual Private Cloud (VPC) containing the
- * resources you are using for your dominant language detection job. For more information, see
- * Amazon
- * VPC. The time that the dominant language detection job was submitted for processing. The identifier assigned to the dominant language detection job. The time that the dominant language detection job completed. A description for the status of a job. The input data configuration that you supplied when you created the dominant language
+ * detection job. The output data configuration that you supplied when you created the dominant language
@@ -2157,12 +2155,6 @@ export interface DominantLanguageDetectionJobProperties {
*/
OutputDataConfig?: OutputDataConfig;
- /**
- * The current status of the dominant language detection job. If the status is
- * The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input
* data. Configuration parameters for a private Virtual Private Cloud (VPC) containing the
+ * resources you are using for your dominant language detection job. For more information, see
+ * Amazon
+ * VPC. The number of inference units currently used by the model using this endpoint. Specifies a reason for failure in cases of The Amazon Resource Number (ARN) of the endpoint. Specifies the status of the endpoint. Because the endpoint updates and creation are
@@ -2250,6 +2245,11 @@ export interface EndpointProperties {
*/
Status?: EndpointStatus | string;
+ /**
+ * Specifies a reason for failure in cases of The Amazon Resource Number (ARN) of the model to which the endpoint is attached. The date and time that the endpoint was last modified. The number of inference units currently used by the model using this endpoint. The creation date and time of the endpoint. The Amazon Resource Number (ARN) of the endpoint. The date and time that the endpoint was last modified. Configuration parameters for a private Virtual Private Cloud (VPC) containing the
- * resources you are using for your entity detection job. For more information, see Amazon
- * VPC. The identifier assigned to the entities detection job. The language code of the input documents. The name that you assigned the entities detection job. The identifier assigned to the entities detection job. The current status of the entities detection job. If the status is The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input
- * data. A description of the status of a job. The Amazon Resource Name (ARN) that identifies the entity recognizer. The time that the entities detection job was submitted for processing. ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt
- * data on the storage volume attached to the ML compute instance(s) that process the analysis
- * job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: Amazon Resource Name (ARN) of a KMS Key:
- * The time that the entities detection job completed The output data configuration that you supplied when you created the entities detection
- * job. The Amazon Resource Name (ARN) that identifies the entity recognizer. A description of the status of a job. The input data configuration that you supplied when you created the entities detection
+ * job. The name that you assigned the entities detection job. The output data configuration that you supplied when you created the entities detection
+ * job. The current status of the entities detection job. If the status is The language code of the input documents. The time that the entities detection job was submitted for processing. The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input
+ * data. The time that the entities detection job completed ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt
+ * data on the storage volume attached to the ML compute instance(s) that process the analysis
+ * job. The VolumeKmsKeyId can be either of the following formats: KMS Key ID: Amazon Resource Name (ARN) of a KMS Key:
+ * The input data configuration that you supplied when you created the entities detection
- * job. Configuration parameters for a private Virtual Private Cloud (VPC) containing the
+ * resources you are using for your entity detection job. For more information, see Amazon
+ * VPC. FAILED
, the Message
field shows the reason for the failure.FAILED
, the Message
field shows the reason for the failure.Failed
status.Failed
status.FAILED
,
+ * the Message
field shows the reason for the failure.
- *
+ * "1234abcd-12ab-34cd-56ef-1234567890ab"
- * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
- * FAILED
,
- * the Message
field shows the reason for the failure.
+ *
*/
- EndTime?: Date;
+ VolumeKmsKeyId?: string;
/**
- * "1234abcd-12ab-34cd-56ef-1234567890ab"
+ * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
A measure of the usefulness of the recognizer results for a specific entity type in the + * test data. High precision means that the recognizer returned substantially more relevant + * results than irrelevant ones.
+ */ + Precision?: number; + /** *A measure of how complete the recognizer results are for a specific entity type in the * test data. High recall means that the recognizer returned most of the relevant results.
@@ -2444,19 +2451,12 @@ export interface EntityTypesEvaluationMetrics { Recall?: number; /** - *A measure of how accurate the recognizer results are for for a specific entity type in the + *
A measure of how accurate the recognizer results are for a specific entity type in the
* test data. It is derived from the Precision
and Recall
values. The
* F1Score
is the harmonic average of the two scores. The highest score is 1, and
* the worst score is 0.
A measure of the usefulness of the recognizer results for a specific entity type in the - * test data. High precision means that the recognizer returned substantially more relevant - * results than irrelevant ones.
- */ - Precision?: number; } export namespace EntityTypesEvaluationMetrics { @@ -2471,9 +2471,10 @@ export namespace EntityTypesEvaluationMetrics { */ export interface EntityRecognizerMetadataEntityTypesListItem { /** - *Indicates the number of times the given entity type was seen in the training data.
+ *Type of entity from the list of entity types in the metadata of an entity recognizer. + *
*/ - NumberOfTrainMentions?: number; + Type?: string; /** *Detailed information about the accuracy of the entity recognizer for a specific item on @@ -2482,10 +2483,9 @@ export interface EntityRecognizerMetadataEntityTypesListItem { EvaluationMetrics?: EntityTypesEvaluationMetrics; /** - *
Type of entity from the list of entity types in the metadata of an entity recognizer. - *
+ *Indicates the number of times the given entity type was seen in the training data.
*/ - Type?: string; + NumberOfTrainMentions?: number; } export namespace EntityRecognizerMetadataEntityTypesListItem { @@ -2498,6 +2498,13 @@ export namespace EntityRecognizerMetadataEntityTypesListItem { *Detailed information about the accuracy of an entity recognizer.
*/ export interface EntityRecognizerEvaluationMetrics { + /** + *A measure of the usefulness of the recognizer results in the test data. High precision + * means that the recognizer returned substantially more relevant results than irrelevant ones. + *
+ */ + Precision?: number; + /** *A measure of how complete the recognizer results are for the test data. High recall means * that the recognizer returned most of the relevant results.
@@ -2510,13 +2517,6 @@ export interface EntityRecognizerEvaluationMetrics { * harmonic average of the two scores. The highest score is 1, and the worst score is 0. */ F1Score?: number; - - /** - *A measure of the usefulness of the recognizer results in the test data. High precision - * means that the recognizer returned substantially more relevant results than irrelevant ones. - *
- */ - Precision?: number; } export namespace EntityRecognizerEvaluationMetrics { @@ -2530,14 +2530,10 @@ export namespace EntityRecognizerEvaluationMetrics { */ export interface EntityRecognizerMetadata { /** - *Entity types from the metadata of an entity recognizer.
- */ - EntityTypes?: EntityRecognizerMetadataEntityTypesListItem[]; - - /** - *Detailed information about the accuracy of an entity recognizer.
+ *The number of documents in the input data that were used to train the entity recognizer. + * Typically this is 80 to 90 percent of the input documents.
*/ - EvaluationMetrics?: EntityRecognizerEvaluationMetrics; + NumberOfTrainedDocuments?: number; /** *The number of documents in the input data that were used to test the entity recognizer. @@ -2546,10 +2542,14 @@ export interface EntityRecognizerMetadata { NumberOfTestDocuments?: number; /** - *
The number of documents in the input data that were used to train the entity recognizer. - * Typically this is 80 to 90 percent of the input documents.
+ *Detailed information about the accuracy of an entity recognizer.
*/ - NumberOfTrainedDocuments?: number; + EvaluationMetrics?: EntityRecognizerEvaluationMetrics; + + /** + *Entity types from the metadata of an entity recognizer.
+ */ + EntityTypes?: EntityRecognizerMetadataEntityTypesListItem[]; } export namespace EntityRecognizerMetadata { @@ -2563,49 +2563,41 @@ export namespace EntityRecognizerMetadata { */ export interface EntityRecognizerProperties { /** - *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt - * data on the storage volume attached to the ML compute instance(s) that process the analysis - * job. The VolumeKmsKeyId can be either of the following formats:
- *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
- *
Amazon Resource Name (ARN) of a KMS Key:
- * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
- *
The Amazon Resource Name (ARN) that identifies the entity recognizer.
*/ - VolumeKmsKeyId?: string; + EntityRecognizerArn?: string; /** - *The Amazon Resource Name (ARN) that identifies the entity recognizer.
+ *The language of the input documents. All documents must be in the same language. Only + * English ("en") is currently supported.
*/ - EntityRecognizerArn?: string; + LanguageCode?: LanguageCode | string; /** - *A description of the status of the recognizer.
+ *Provides the status of the entity recognizer.
*/ - Message?: string; + Status?: ModelStatus | string; /** - *Provides information about an entity recognizer.
+ *A description of the status of the recognizer.
*/ - RecognizerMetadata?: EntityRecognizerMetadata; + Message?: string; /** - *The language of the input documents. All documents must be in the same language. Only - * English ("en") is currently supported.
+ *The time that the recognizer was submitted for processing.
*/ - LanguageCode?: LanguageCode | string; + SubmitTime?: Date; /** *The time that the recognizer creation completed.
*/ EndTime?: Date; + /** + *The time that training of the entity recognizer started.
+ */ + TrainingStartTime?: Date; + /** *The time that training of the entity recognizer was completed.
*/ @@ -2617,16 +2609,9 @@ export interface EntityRecognizerProperties { InputDataConfig?: EntityRecognizerInputDataConfig; /** - *Configuration parameters for a private Virtual Private Cloud (VPC) containing the - * resources you are using for your custom entity recognizer. For more information, see Amazon - * VPC.
+ *Provides information about an entity recognizer.
*/ - VpcConfig?: VpcConfig; - - /** - *The time that the recognizer was submitted for processing.
- */ - SubmitTime?: Date; + RecognizerMetadata?: EntityRecognizerMetadata; /** *The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants @@ -2635,14 +2620,29 @@ export interface EntityRecognizerProperties { DataAccessRoleArn?: string; /** - *
The time that training of the entity recognizer started.
+ *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt + * data on the storage volume attached to the ML compute instance(s) that process the analysis + * job. The VolumeKmsKeyId can be either of the following formats:
+ *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Amazon Resource Name (ARN) of a KMS Key:
+ * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Provides the status of the entity recognizer.
+ *Configuration parameters for a private Virtual Private Cloud (VPC) containing the + * resources you are using for your custom entity recognizer. For more information, see Amazon + * VPC.
*/ - Status?: ModelStatus | string; + VpcConfig?: VpcConfig; } export namespace EntityRecognizerProperties { @@ -2668,6 +2668,99 @@ export namespace DescribeEntityRecognizerResponse { }); } +export interface DescribeEventsDetectionJobRequest { + /** + *The identifier of the events detection job.
+ */ + JobId: string | undefined; +} + +export namespace DescribeEventsDetectionJobRequest { + export const filterSensitiveLog = (obj: DescribeEventsDetectionJobRequest): any => ({ + ...obj, + }); +} + +/** + *Provides information about an events detection job.
+ */ +export interface EventsDetectionJobProperties { + /** + *The identifier assigned to the events detection job.
+ */ + JobId?: string; + + /** + *The name you assigned the events detection job.
+ */ + JobName?: string; + + /** + *The current status of the events detection job.
+ */ + JobStatus?: JobStatus | string; + + /** + *A description of the status of the events detection job.
+ */ + Message?: string; + + /** + *The time that the events detection job was submitted for processing.
+ */ + SubmitTime?: Date; + + /** + *The time that the events detection job completed.
+ */ + EndTime?: Date; + + /** + *The input data configuration that you supplied when you created the events detection job.
+ */ + InputDataConfig?: InputDataConfig; + + /** + *The output data configuration that you supplied when you created the events detection job.
+ */ + OutputDataConfig?: OutputDataConfig; + + /** + *The language code of the input documents.
+ */ + LanguageCode?: LanguageCode | string; + + /** + *The Amazon Resource Name (ARN) of the AWS Identify and Access Management (IAM) role that + * grants Amazon Comprehend read access to your input data.
+ */ + DataAccessRoleArn?: string; + + /** + *The types of events that are detected by the job.
+ */ + TargetEventTypes?: string[]; +} + +export namespace EventsDetectionJobProperties { + export const filterSensitiveLog = (obj: EventsDetectionJobProperties): any => ({ + ...obj, + }); +} + +export interface DescribeEventsDetectionJobResponse { + /** + *An object that contains the properties associated with an event detection job.
+ */ + EventsDetectionJobProperties?: EventsDetectionJobProperties; +} + +export namespace DescribeEventsDetectionJobResponse { + export const filterSensitiveLog = (obj: DescribeEventsDetectionJobResponse): any => ({ + ...obj, + }); +} + export interface DescribeKeyPhrasesDetectionJobRequest { /** *The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its @@ -2687,32 +2780,14 @@ export namespace DescribeKeyPhrasesDetectionJobRequest { */ export interface KeyPhrasesDetectionJobProperties { /** - *
The time that the key phrases detection job completed.
- */ - EndTime?: Date; - - /** - *The time that the key phrases detection job was submitted for processing.
+ *The identifier assigned to the key phrases detection job.
*/ - SubmitTime?: Date; + JobId?: string; /** - *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt - * data on the storage volume attached to the ML compute instance(s) that process the analysis - * job. The VolumeKmsKeyId can be either of the following formats:
- *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
- *
Amazon Resource Name (ARN) of a KMS Key:
- * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
- *
The name that you assigned the key phrases detection job.
*/ - VolumeKmsKeyId?: string; + JobName?: string; /** *The current status of the key phrases detection job. If the status is FAILED
,
@@ -2721,26 +2796,25 @@ export interface KeyPhrasesDetectionJobProperties {
JobStatus?: JobStatus | string;
/**
- *
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input - * data.
+ *A description of the status of a job.
*/ - DataAccessRoleArn?: string; + Message?: string; /** - *The input data configuration that you supplied when you created the key phrases detection - * job.
+ *The time that the key phrases detection job was submitted for processing.
*/ - InputDataConfig?: InputDataConfig; + SubmitTime?: Date; /** - *A description of the status of a job.
+ *The time that the key phrases detection job completed.
*/ - Message?: string; + EndTime?: Date; /** - *The identifier assigned to the key phrases detection job.
+ *The input data configuration that you supplied when you created the key phrases detection + * job.
*/ - JobId?: string; + InputDataConfig?: InputDataConfig; /** *The output data configuration that you supplied when you created the key phrases detection @@ -2749,9 +2823,33 @@ export interface KeyPhrasesDetectionJobProperties { OutputDataConfig?: OutputDataConfig; /** - *
The name that you assigned the key phrases detection job.
+ *The language code of the input documents.
*/ - JobName?: string; + LanguageCode?: LanguageCode | string; + + /** + *The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input + * data.
+ */ + DataAccessRoleArn?: string; + + /** + *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt + * data on the storage volume attached to the ML compute instance(s) that process the analysis + * job. The VolumeKmsKeyId can be either of the following formats:
+ *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Amazon Resource Name (ARN) of a KMS Key:
+ * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Configuration parameters for a private Virtual Private Cloud (VPC) containing the @@ -2759,11 +2857,6 @@ export interface KeyPhrasesDetectionJobProperties { * VPC.
*/ VpcConfig?: VpcConfig; - - /** - *The language code of the input documents.
- */ - LanguageCode?: LanguageCode | string; } export namespace KeyPhrasesDetectionJobProperties { @@ -2809,17 +2902,17 @@ export enum PiiEntitiesDetectionMode { *Provides configuration parameters for the output of PII entity detection jobs.
*/ export interface PiiOutputDataConfig { - /** - *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the - * output results from an analysis job.
- */ - KmsKeyId?: string; - /** *When you use the PiiOutputDataConfig
object with asynchronous operations,
* you specify the Amazon S3 location where you want to write the output data.
ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt the + * output results from an analysis job.
+ */ + KmsKeyId?: string; } export namespace PiiOutputDataConfig { @@ -2863,18 +2956,18 @@ export enum PiiEntityType { *Provides configuration parameters for PII entity redaction.
*/ export interface RedactionConfig { - /** - *Specifies whether the PII entity is redacted with the mask character or the entity - * type.
- */ - MaskMode?: PiiEntitiesDetectionMaskMode | string; - /** *An array of the types of PII entities that Amazon Comprehend detects in the input text for * your request.
*/ PiiEntityTypes?: (PiiEntityType | string)[]; + /** + *Specifies whether the PII entity is redacted with the mask character or the entity + * type.
+ */ + MaskMode?: PiiEntitiesDetectionMaskMode | string; + /** *A character that replaces each character in the redacted PII entity.
*/ @@ -2892,52 +2985,46 @@ export namespace RedactionConfig { */ export interface PiiEntitiesDetectionJobProperties { /** - *The time that the PII entities detection job completed.
- */ - EndTime?: Date; - - /** - *The output data configuration that you supplied when you created the PII entities - * detection job.
+ *The identifier assigned to the PII entities detection job.
*/ - OutputDataConfig?: PiiOutputDataConfig; + JobId?: string; /** - *A description of the status of a job.
+ *The name that you assigned the PII entities detection job.
*/ - Message?: string; + JobName?: string; /** - *The name that you assigned the PII entities detection job.
+ *The current status of the PII entities detection job. If the status is
+ * FAILED
, the Message
field shows the reason for the failure.
Specifies whether the output provides the locations (offsets) of PII entities or a file in - * which PII entities are redacted.
+ *A description of the status of a job.
*/ - Mode?: PiiEntitiesDetectionMode | string; + Message?: string; /** - *The language code of the input documents
+ *The time that the PII entities detection job was submitted for processing.
*/ - LanguageCode?: LanguageCode | string; + SubmitTime?: Date; /** - *The identifier assigned to the PII entities detection job.
+ *The time that the PII entities detection job completed.
*/ - JobId?: string; + EndTime?: Date; /** - *The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input - * data.
+ *The input properties for a PII entities detection job.
*/ - DataAccessRoleArn?: string; + InputDataConfig?: InputDataConfig; /** - *The time that the PII entities detection job was submitted for processing.
+ *The output data configuration that you supplied when you created the PII entities + * detection job.
*/ - SubmitTime?: Date; + OutputDataConfig?: PiiOutputDataConfig; /** *Provides configuration parameters for PII entity redaction.
@@ -2948,15 +3035,21 @@ export interface PiiEntitiesDetectionJobProperties { RedactionConfig?: RedactionConfig; /** - *The input properties for a PII entities detection job.
+ *The language code of the input documents
*/ - InputDataConfig?: InputDataConfig; + LanguageCode?: LanguageCode | string; /** - *The current status of the PII entities detection job. If the status is
- * FAILED
, the Message
field shows the reason for the failure.
The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input + * data.
*/ - JobStatus?: JobStatus | string; + DataAccessRoleArn?: string; + + /** + *Specifies whether the output provides the locations (offsets) of PII entities or a file in + * which PII entities are redacted.
+ */ + Mode?: PiiEntitiesDetectionMode | string; } export namespace PiiEntitiesDetectionJobProperties { @@ -2997,27 +3090,30 @@ export namespace DescribeSentimentDetectionJobRequest { */ export interface SentimentDetectionJobProperties { /** - *The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input - * data.
+ *The identifier assigned to the sentiment detection job.
*/ - DataAccessRoleArn?: string; + JobId?: string; /** - *The time that the sentiment detection job was submitted for processing.
+ *The name that you assigned to the sentiment detection job
*/ - SubmitTime?: Date; + JobName?: string; /** - *Configuration parameters for a private Virtual Private Cloud (VPC) containing the - * resources you are using for your sentiment detection job. For more information, see Amazon - * VPC.
+ *The current status of the sentiment detection job. If the status is FAILED
,
+ * the Messages
field shows the reason for the failure.
The language code of the input documents.
+ *A description of the status of a job.
*/ - LanguageCode?: LanguageCode | string; + Message?: string; + + /** + *The time that the sentiment detection job was submitted for processing.
+ */ + SubmitTime?: Date; /** *The time that the sentiment detection job ended.
@@ -3025,26 +3121,27 @@ export interface SentimentDetectionJobProperties { EndTime?: Date; /** - *The output data configuration that you supplied when you created the sentiment detection + *
The input data configuration that you supplied when you created the sentiment detection * job.
*/ - OutputDataConfig?: OutputDataConfig; + InputDataConfig?: InputDataConfig; /** - *The name that you assigned to the sentiment detection job
+ *The output data configuration that you supplied when you created the sentiment detection + * job.
*/ - JobName?: string; + OutputDataConfig?: OutputDataConfig; /** - *A description of the status of a job.
+ *The language code of the input documents.
*/ - Message?: string; + LanguageCode?: LanguageCode | string; /** - *The input data configuration that you supplied when you created the sentiment detection - * job.
+ *The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input + * data.
*/ - InputDataConfig?: InputDataConfig; + DataAccessRoleArn?: string; /** *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt @@ -3065,15 +3162,11 @@ export interface SentimentDetectionJobProperties { VolumeKmsKeyId?: string; /** - *
The identifier assigned to the sentiment detection job.
- */ - JobId?: string; - - /** - *The current status of the sentiment detection job. If the status is FAILED
,
- * the Messages
field shows the reason for the failure.
Configuration parameters for a private Virtual Private Cloud (VPC) containing the + * resources you are using for your sentiment detection job. For more information, see Amazon + * VPC.
*/ - JobStatus?: JobStatus | string; + VpcConfig?: VpcConfig; } export namespace SentimentDetectionJobProperties { @@ -3112,6 +3205,16 @@ export namespace DescribeTopicsDetectionJobRequest { *Provides information about a topic detection job.
*/ export interface TopicsDetectionJobProperties { + /** + *The identifier assigned to the topic detection job.
+ */ + JobId?: string; + + /** + *The name of the topic detection job.
+ */ + JobName?: string; + /** *The current status of the topic detection job. If the status is Failed
,
* the reason for the failure is shown in the Message
field.
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants - * Amazon Comprehend read access to your job data.
+ *A description for the status of a job.
*/ - DataAccessRoleArn?: string; + Message?: string; /** - *Configuration parameters for a private Virtual Private Cloud (VPC) containing the - * resources you are using for your topic detection job. For more information, see Amazon - * VPC.
+ *The time that the topic detection job was submitted for processing.
*/ - VpcConfig?: VpcConfig; + SubmitTime?: Date; + + /** + *The time that the topic detection job was completed.
+ */ + EndTime?: Date; /** *The input data configuration supplied when you created the topic detection @@ -3138,9 +3243,10 @@ export interface TopicsDetectionJobProperties { InputDataConfig?: InputDataConfig; /** - *
The identifier assigned to the topic detection job.
+ *The output data configuration supplied when you created the topic detection + * job.
*/ - JobId?: string; + OutputDataConfig?: OutputDataConfig; /** *The number of topics to detect supplied when you created the topic detection job. The @@ -3148,6 +3254,12 @@ export interface TopicsDetectionJobProperties { */ NumberOfTopics?: number; + /** + *
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that grants + * Amazon Comprehend read access to your job data.
+ */ + DataAccessRoleArn?: string; + /** *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt * data on the storage volume attached to the ML compute instance(s) that process the analysis @@ -3167,30 +3279,11 @@ export interface TopicsDetectionJobProperties { VolumeKmsKeyId?: string; /** - *
The time that the topic detection job was submitted for processing.
- */ - SubmitTime?: Date; - - /** - *A description for the status of a job.
- */ - Message?: string; - - /** - *The name of the topic detection job.
- */ - JobName?: string; - - /** - *The time that the topic detection job was completed.
- */ - EndTime?: Date; - - /** - *The output data configuration supplied when you created the topic detection - * job.
+ *Configuration parameters for a private Virtual Private Cloud (VPC) containing the + * resources you are using for your topic detection job. For more information, see Amazon + * VPC.
*/ - OutputDataConfig?: OutputDataConfig; + VpcConfig?: VpcConfig; } export namespace TopicsDetectionJobProperties { @@ -3250,15 +3343,6 @@ export interface DetectEntitiesRequest { */ Text: string | undefined; - /** - *The Amazon Resource Name of an endpoint that is associated with a custom entity - * recognition model. Provide an endpoint if you want to detect entities by using your own custom - * model instead of the default model that is used by Amazon Comprehend.
- *If you specify an endpoint, Amazon Comprehend uses the language of your custom model, and - * it ignores any language code that you provide in your request.
- */ - EndpointArn?: string; - /** *The language of the input documents. You can specify any of the primary languages * supported by Amazon Comprehend. All documents must be in the same language.
@@ -3267,6 +3351,15 @@ export interface DetectEntitiesRequest { * specify here. */ LanguageCode?: LanguageCode | string; + + /** + *The Amazon Resource Name of an endpoint that is associated with a custom entity + * recognition model. Provide an endpoint if you want to detect entities by using your own custom + * model instead of the default model that is used by Amazon Comprehend.
+ *If you specify an endpoint, Amazon Comprehend uses the language of your custom model, and + * it ignores any language code that you provide in your request.
+ */ + EndpointArn?: string; } export namespace DetectEntitiesRequest { @@ -3295,17 +3388,17 @@ export namespace DetectEntitiesResponse { } export interface DetectKeyPhrasesRequest { - /** - *The language of the input documents. You can specify any of the primary languages - * supported by Amazon Comprehend. All documents must be in the same language.
- */ - LanguageCode: LanguageCode | string | undefined; - /** *A UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded * characters.
*/ Text: string | undefined; + + /** + *The language of the input documents. You can specify any of the primary languages + * supported by Amazon Comprehend. All documents must be in the same language.
+ */ + LanguageCode: LanguageCode | string | undefined; } export namespace DetectKeyPhrasesRequest { @@ -3354,11 +3447,6 @@ export namespace DetectPiiEntitiesRequest { *Provides information about a PII entity.
*/ export interface PiiEntity { - /** - *The entity's type.
- */ - Type?: PiiEntityType | string; - /** *The level of confidence that Amazon Comprehend has in the accuracy of the * detection.
@@ -3366,12 +3454,9 @@ export interface PiiEntity { Score?: number; /** - *A character offset in the input text that shows where the PII entity ends. The offset - * returns the position of each UTF-8 code point in the string. A code point - * is the abstract character from a particular graphical representation. For example, a - * multi-byte UTF-8 character maps to a single code point.
+ *The entity's type.
*/ - EndOffset?: number; + Type?: PiiEntityType | string; /** *A character offset in the input text that shows where the PII entity begins (the first @@ -3381,6 +3466,14 @@ export interface PiiEntity { * point.
*/ BeginOffset?: number; + + /** + *A character offset in the input text that shows where the PII entity ends. The offset + * returns the position of each UTF-8 code point in the string. A code point + * is the abstract character from a particular graphical representation. For example, a + * multi-byte UTF-8 character maps to a single code point.
+ */ + EndOffset?: number; } export namespace PiiEntity { @@ -3509,13 +3602,6 @@ export interface DocumentClassificationJobFilter { */ JobName?: string; - /** - *Filters the list of jobs based on the time that the job was submitted for processing. - * Returns only jobs submitted after the specified time. Jobs are returned in descending order, - * newest to oldest.
- */ - SubmitTimeAfter?: Date; - /** *Filters the list based on job status. Returns only jobs with the specified status.
*/ @@ -3526,7 +3612,14 @@ export interface DocumentClassificationJobFilter { * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, * oldest to newest. */ - SubmitTimeBefore?: Date; + SubmitTimeBefore?: Date; + + /** + *Filters the list of jobs based on the time that the job was submitted for processing. + * Returns only jobs submitted after the specified time. Jobs are returned in descending order, + * newest to oldest.
+ */ + SubmitTimeAfter?: Date; } export namespace DocumentClassificationJobFilter { @@ -3609,17 +3702,17 @@ export namespace DocumentClassifierFilter { } export interface ListDocumentClassifiersRequest { - /** - *Identifies the next page of results to return.
- */ - NextToken?: string; - /** *Filters the jobs that are returned. You can filter jobs on their name, status, or the date * and time that they were submitted. You can only set one filter at a time.
*/ Filter?: DocumentClassifierFilter; + /** + *Identifies the next page of results to return.
+ */ + NextToken?: string; + /** *The maximum number of results to return in each page. The default is 100.
*/ @@ -3660,6 +3753,11 @@ export namespace ListDocumentClassifiersResponse { * information, see the operation. */ export interface DominantLanguageDetectionJobFilter { + /** + *Filters on the name of the job.
+ */ + JobName?: string; + /** *Filters the list of jobs based on job status. Returns only jobs with the specified * status.
@@ -3668,22 +3766,17 @@ export interface DominantLanguageDetectionJobFilter { /** *Filters the list of jobs based on the time that the job was submitted for processing. - * Returns only jobs submitted after the specified time. Jobs are returned in descending order, - * newest to oldest.
- */ - SubmitTimeAfter?: Date; - - /** - *Filters on the name of the job.
+ * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, + * oldest to newest. */ - JobName?: string; + SubmitTimeBefore?: Date; /** *Filters the list of jobs based on the time that the job was submitted for processing. - * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, - * oldest to newest.
+ * Returns only jobs submitted after the specified time. Jobs are returned in descending order, + * newest to oldest. */ - SubmitTimeBefore?: Date; + SubmitTimeAfter?: Date; } export namespace DominantLanguageDetectionJobFilter { @@ -3693,17 +3786,17 @@ export namespace DominantLanguageDetectionJobFilter { } export interface ListDominantLanguageDetectionJobsRequest { - /** - *Identifies the next page of results to return.
- */ - NextToken?: string; - /** *Filters that jobs that are returned. You can filter jobs on their name, status, or the * date and time that they were submitted. You can only set one filter at a time.
*/ Filter?: DominantLanguageDetectionJobFilter; + /** + *Identifies the next page of results to return.
+ */ + NextToken?: string; + /** *The maximum number of results to return in each page. The default is 100.
*/ @@ -3718,14 +3811,14 @@ export namespace ListDominantLanguageDetectionJobsRequest { export interface ListDominantLanguageDetectionJobsResponse { /** - *Identifies the next page of results to return.
+ *A list containing the properties of each job that is returned.
*/ - NextToken?: string; + DominantLanguageDetectionJobPropertiesList?: DominantLanguageDetectionJobProperties[]; /** - *A list containing the properties of each job that is returned.
+ *Identifies the next page of results to return.
*/ - DominantLanguageDetectionJobPropertiesList?: DominantLanguageDetectionJobProperties[]; + NextToken?: string; } export namespace ListDominantLanguageDetectionJobsResponse { @@ -3735,11 +3828,16 @@ export namespace ListDominantLanguageDetectionJobsResponse { } /** - *The filter used to determine which endpoints are are returned. You can filter jobs on + *
The filter used to determine which endpoints are returned. You can filter jobs on * their name, model, status, or the date and time that they were created. You can only set one * filter at a time.
*/ export interface EndpointFilter { + /** + *The Amazon Resource Number (ARN) of the model to which the endpoint is attached.
+ */ + ModelArn?: string; + /** *Specifies the status of the endpoint being returned. Possible values are: Creating, Ready, * Updating, Deleting, Failed.
@@ -3755,11 +3853,6 @@ export interface EndpointFilter { *Specifies a date after which the returned endpoint or endpoints were created.
*/ CreationTimeAfter?: Date; - - /** - *The Amazon Resource Number (ARN) of the model to which the endpoint is attached.
- */ - ModelArn?: string; } export namespace EndpointFilter { @@ -3770,9 +3863,11 @@ export namespace EndpointFilter { export interface ListEndpointsRequest { /** - *The maximum number of results to return in each page. The default is 100.
+ *Filters the endpoints that are returned. You can filter endpoints on their name, model, + * status, or the date and time that they were created. You can only set one filter at a time. + *
*/ - MaxResults?: number; + Filter?: EndpointFilter; /** *Identifies the next page of results to return.
@@ -3780,11 +3875,9 @@ export interface ListEndpointsRequest { NextToken?: string; /** - *Filters the endpoints that are returned. You can filter endpoints on their name, model, - * status, or the date and time that they were created. You can only set one filter at a time. - *
+ *The maximum number of results to return in each page. The default is 100.
*/ - Filter?: EndpointFilter; + MaxResults?: number; } export namespace ListEndpointsRequest { @@ -3830,17 +3923,17 @@ export interface EntitiesDetectionJobFilter { /** *Filters the list of jobs based on the time that the job was submitted for processing. - * Returns only jobs submitted after the specified time. Jobs are returned in descending order, - * newest to oldest.
+ * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, + * oldest to newest. */ - SubmitTimeAfter?: Date; + SubmitTimeBefore?: Date; /** *Filters the list of jobs based on the time that the job was submitted for processing. - * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, - * oldest to newest.
+ * Returns only jobs submitted after the specified time. Jobs are returned in descending order, + * newest to oldest. */ - SubmitTimeBefore?: Date; + SubmitTimeAfter?: Date; } export namespace EntitiesDetectionJobFilter { @@ -3850,17 +3943,17 @@ export namespace EntitiesDetectionJobFilter { } export interface ListEntitiesDetectionJobsRequest { - /** - *Identifies the next page of results to return.
- */ - NextToken?: string; - /** *Filters the jobs that are returned. You can filter jobs on their name, status, or the date * and time that they were submitted. You can only set one filter at a time.
*/ Filter?: EntitiesDetectionJobFilter; + /** + *Identifies the next page of results to return.
+ */ + NextToken?: string; + /** *The maximum number of results to return in each page. The default is 100.
*/ @@ -3896,6 +3989,11 @@ export namespace ListEntitiesDetectionJobsResponse { * filtering parameter in a request. For more information, see the operation./> */ export interface EntityRecognizerFilter { + /** + *The status of an entity recognizer.
+ */ + Status?: ModelStatus | string; + /** *Filters the list of entities based on the time that the list was submitted for processing. * Returns only jobs submitted before the specified time. Jobs are returned in descending order, @@ -3903,11 +4001,6 @@ export interface EntityRecognizerFilter { */ SubmitTimeBefore?: Date; - /** - *
The status of an entity recognizer.
- */ - Status?: ModelStatus | string; - /** *Filters the list of entities based on the time that the list was submitted for processing. * Returns only jobs submitted after the specified time. Jobs are returned in ascending order, @@ -3971,10 +4064,14 @@ export namespace ListEntityRecognizersResponse { } /** - *
Provides information for filtering a list of dominant language detection jobs. For more - * information, see the operation.
+ *Provides information for filtering a list of event detection jobs.
*/ -export interface KeyPhrasesDetectionJobFilter { +export interface EventsDetectionJobFilter { + /** + *Filters on the name of the events detection job.
+ */ + JobName?: string; + /** *Filters the list of jobs based on job status. Returns only jobs with the specified * status.
@@ -3988,11 +4085,85 @@ export interface KeyPhrasesDetectionJobFilter { */ SubmitTimeBefore?: Date; + /** + *Filters the list of jobs based on the time that the job was submitted for processing. + * Returns only jobs submitted after the specified time. Jobs are returned in descending order, + * newest to oldest.
+ */ + SubmitTimeAfter?: Date; +} + +export namespace EventsDetectionJobFilter { + export const filterSensitiveLog = (obj: EventsDetectionJobFilter): any => ({ + ...obj, + }); +} + +export interface ListEventsDetectionJobsRequest { + /** + *Filters the jobs that are returned. You can filter jobs on their name, status, or the date + * and time that they were submitted. You can only set one filter at a time.
+ */ + Filter?: EventsDetectionJobFilter; + + /** + *Identifies the next page of results to return.
+ */ + NextToken?: string; + + /** + *The maximum number of results to return in each page.
+ */ + MaxResults?: number; +} + +export namespace ListEventsDetectionJobsRequest { + export const filterSensitiveLog = (obj: ListEventsDetectionJobsRequest): any => ({ + ...obj, + }); +} + +export interface ListEventsDetectionJobsResponse { + /** + *A list containing the properties of each job that is returned.
+ */ + EventsDetectionJobPropertiesList?: EventsDetectionJobProperties[]; + + /** + *Identifies the next page of results to return.
+ */ + NextToken?: string; +} + +export namespace ListEventsDetectionJobsResponse { + export const filterSensitiveLog = (obj: ListEventsDetectionJobsResponse): any => ({ + ...obj, + }); +} + +/** + *Provides information for filtering a list of dominant language detection jobs. For more + * information, see the operation.
+ */ +export interface KeyPhrasesDetectionJobFilter { /** *Filters on the name of the job.
*/ JobName?: string; + /** + *Filters the list of jobs based on job status. Returns only jobs with the specified + * status.
+ */ + JobStatus?: JobStatus | string; + + /** + *Filters the list of jobs based on the time that the job was submitted for processing. + * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, + * oldest to newest.
+ */ + SubmitTimeBefore?: Date; + /** *Filters the list of jobs based on the time that the job was submitted for processing. * Returns only jobs submitted after the specified time. Jobs are returned in descending order, @@ -4008,17 +4179,17 @@ export namespace KeyPhrasesDetectionJobFilter { } export interface ListKeyPhrasesDetectionJobsRequest { - /** - *
Identifies the next page of results to return.
- */ - NextToken?: string; - /** *Filters the jobs that are returned. You can filter jobs on their name, status, or the date * and time that they were submitted. You can only set one filter at a time.
*/ Filter?: KeyPhrasesDetectionJobFilter; + /** + *Identifies the next page of results to return.
+ */ + NextToken?: string; + /** *The maximum number of results to return in each page. The default is 100.
*/ @@ -4053,13 +4224,6 @@ export namespace ListKeyPhrasesDetectionJobsResponse { *Provides information for filtering a list of PII entity detection jobs.
*/ export interface PiiEntitiesDetectionJobFilter { - /** - *Filters the list of jobs based on the time that the job was submitted for processing. - * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, - * oldest to newest.
- */ - SubmitTimeBefore?: Date; - /** *Filters on the name of the job.
*/ @@ -4071,6 +4235,13 @@ export interface PiiEntitiesDetectionJobFilter { */ JobStatus?: JobStatus | string; + /** + *Filters the list of jobs based on the time that the job was submitted for processing. + * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, + * oldest to newest.
+ */ + SubmitTimeBefore?: Date; + /** *Filters the list of jobs based on the time that the job was submitted for processing. * Returns only jobs submitted after the specified time. Jobs are returned in descending order, @@ -4086,17 +4257,17 @@ export namespace PiiEntitiesDetectionJobFilter { } export interface ListPiiEntitiesDetectionJobsRequest { - /** - *
Identifies the next page of results to return.
- */ - NextToken?: string; - /** *Filters the jobs that are returned. You can filter jobs on their name, status, or the date * and time that they were submitted. You can only set one filter at a time.
*/ Filter?: PiiEntitiesDetectionJobFilter; + /** + *Identifies the next page of results to return.
+ */ + NextToken?: string; + /** *The maximum number of results to return in each page.
*/ @@ -4111,14 +4282,14 @@ export namespace ListPiiEntitiesDetectionJobsRequest { export interface ListPiiEntitiesDetectionJobsResponse { /** - *Identifies the next page of results to return.
+ *A list containing the properties of each job that is returned.
*/ - NextToken?: string; + PiiEntitiesDetectionJobPropertiesList?: PiiEntitiesDetectionJobProperties[]; /** - *A list containing the properties of each job that is returned.
+ *Identifies the next page of results to return.
*/ - PiiEntitiesDetectionJobPropertiesList?: PiiEntitiesDetectionJobProperties[]; + NextToken?: string; } export namespace ListPiiEntitiesDetectionJobsResponse { @@ -4132,6 +4303,11 @@ export namespace ListPiiEntitiesDetectionJobsResponse { * information, see the operation. */ export interface SentimentDetectionJobFilter { + /** + *Filters on the name of the job.
+ */ + JobName?: string; + /** *Filters the list of jobs based on job status. Returns only jobs with the specified * status.
@@ -4151,11 +4327,6 @@ export interface SentimentDetectionJobFilter { * newest to oldest. */ SubmitTimeAfter?: Date; - - /** - *Filters on the name of the job.
- */ - JobName?: string; } export namespace SentimentDetectionJobFilter { @@ -4165,11 +4336,6 @@ export namespace SentimentDetectionJobFilter { } export interface ListSentimentDetectionJobsRequest { - /** - *The maximum number of results to return in each page. The default is 100.
- */ - MaxResults?: number; - /** *Filters the jobs that are returned. You can filter jobs on their name, status, or the date * and time that they were submitted. You can only set one filter at a time.
@@ -4180,6 +4346,11 @@ export interface ListSentimentDetectionJobsRequest { *Identifies the next page of results to return.
*/ NextToken?: string; + + /** + *The maximum number of results to return in each page. The default is 100.
+ */ + MaxResults?: number; } export namespace ListSentimentDetectionJobsRequest { @@ -4190,14 +4361,14 @@ export namespace ListSentimentDetectionJobsRequest { export interface ListSentimentDetectionJobsResponse { /** - *Identifies the next page of results to return.
+ *A list containing the properties of each job that is returned.
*/ - NextToken?: string; + SentimentDetectionJobPropertiesList?: SentimentDetectionJobProperties[]; /** - *A list containing the properties of each job that is returned.
+ *Identifies the next page of results to return.
*/ - SentimentDetectionJobPropertiesList?: SentimentDetectionJobProperties[]; + NextToken?: string; } export namespace ListSentimentDetectionJobsResponse { @@ -4220,7 +4391,13 @@ export namespace ListTagsForResourceRequest { }); } -export interface ListTagsForResourceResponse { +export interface ListTagsForResourceResponse { + /** + *The Amazon Resource Name (ARN) of the given Amazon Comprehend resource you are + * querying.
+ */ + ResourceArn?: string; + /** *Tags associated with the Amazon Comprehend resource being queried. A tag is a key-value * pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a tag with @@ -4228,12 +4405,6 @@ export interface ListTagsForResourceResponse { *
*/ Tags?: Tag[]; - - /** - *The Amazon Resource Name (ARN) of the given Amazon Comprehend resource you are - * querying.
- */ - ResourceArn?: string; } export namespace ListTagsForResourceResponse { @@ -4253,11 +4424,10 @@ export interface TopicsDetectionJobFilter { JobName?: string; /** - *Filters the list of jobs based on the time that the job was submitted for processing. - * Only returns jobs submitted after the specified time. Jobs are returned in ascending order, - * oldest to newest.
+ *Filters the list of topic detection jobs based on job status. Returns only jobs with + * the specified status.
*/ - SubmitTimeAfter?: Date; + JobStatus?: JobStatus | string; /** *Filters the list of jobs based on the time that the job was submitted for processing. @@ -4267,10 +4437,11 @@ export interface TopicsDetectionJobFilter { SubmitTimeBefore?: Date; /** - *
Filters the list of topic detection jobs based on job status. Returns only jobs with - * the specified status.
+ *Filters the list of jobs based on the time that the job was submitted for processing. + * Only returns jobs submitted after the specified time. Jobs are returned in ascending order, + * oldest to newest.
*/ - JobStatus?: JobStatus | string; + SubmitTimeAfter?: Date; } export namespace TopicsDetectionJobFilter { @@ -4280,6 +4451,12 @@ export namespace TopicsDetectionJobFilter { } export interface ListTopicsDetectionJobsRequest { + /** + *Filters the jobs that are returned. Jobs can be filtered on their name, status, or the + * date and time that they were submitted. You can set only one filter at a time.
+ */ + Filter?: TopicsDetectionJobFilter; + /** *Identifies the next page of results to return.
*/ @@ -4289,12 +4466,6 @@ export interface ListTopicsDetectionJobsRequest { *The maximum number of results to return in each page. The default is 100.
*/ MaxResults?: number; - - /** - *Filters the jobs that are returned. Jobs can be filtered on their name, status, or the - * date and time that they were submitted. You can set only one filter at a time.
- */ - Filter?: TopicsDetectionJobFilter; } export namespace ListTopicsDetectionJobsRequest { @@ -4322,6 +4493,27 @@ export namespace ListTopicsDetectionJobsResponse { } export interface StartDocumentClassificationJobRequest { + /** + *The identifier of the job.
+ */ + JobName?: string; + + /** + *The Amazon Resource Name (ARN) of the document classifier to use to process the + * job.
+ */ + DocumentClassifierArn: string | undefined; + + /** + *Specifies the format and location of the input data for the job.
+ */ + InputDataConfig: InputDataConfig | undefined; + + /** + *Specifies where to send the output files.
+ */ + OutputDataConfig: OutputDataConfig | undefined; + /** *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that * grants Amazon Comprehend read access to your input data.
@@ -4329,9 +4521,10 @@ export interface StartDocumentClassificationJobRequest { DataAccessRoleArn: string | undefined; /** - *Specifies the format and location of the input data for the job.
+ *A unique identifier for the request. If you do not set the client request token, Amazon + * Comprehend generates one.
*/ - InputDataConfig: InputDataConfig | undefined; + ClientRequestToken?: string; /** *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt @@ -4351,28 +4544,6 @@ export interface StartDocumentClassificationJobRequest { */ VolumeKmsKeyId?: string; - /** - *
The Amazon Resource Name (ARN) of the document classifier to use to process the - * job.
- */ - DocumentClassifierArn: string | undefined; - - /** - *Specifies where to send the output files.
- */ - OutputDataConfig: OutputDataConfig | undefined; - - /** - *A unique identifier for the request. If you do not set the client request token, Amazon - * Comprehend generates one.
- */ - ClientRequestToken?: string; - - /** - *The identifier of the job.
- */ - JobName?: string; - /** *Configuration parameters for an optional private Virtual Private Cloud (VPC) containing * the resources you are using for your document classification job. For more information, see @@ -4389,6 +4560,12 @@ export namespace StartDocumentClassificationJobRequest { } export interface StartDocumentClassificationJobResponse { + /** + *
The identifier generated for the job. To get the status of the job, use this identifier + * with the operation.
+ */ + JobId?: string; + /** *The status of the job:
*The identifier generated for the job. To get the status of the job, use this identifier - * with the operation.
- */ - JobId?: string; } export namespace StartDocumentClassificationJobResponse { @@ -4429,6 +4600,27 @@ export namespace StartDocumentClassificationJobResponse { } export interface StartDominantLanguageDetectionJobRequest { + /** + *Specifies the format and location of the input data for the job.
+ */ + InputDataConfig: InputDataConfig | undefined; + + /** + *Specifies where to send the output files.
+ */ + OutputDataConfig: OutputDataConfig | undefined; + + /** + *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that + * grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
+ */ + DataAccessRoleArn: string | undefined; + + /** + *An identifier for the job.
+ */ + JobName?: string; + /** *A unique identifier for the request. If you do not set the client request token, Amazon * Comprehend generates one.
@@ -4453,33 +4645,12 @@ export interface StartDominantLanguageDetectionJobRequest { */ VolumeKmsKeyId?: string; - /** - *Specifies the format and location of the input data for the job.
- */ - InputDataConfig: InputDataConfig | undefined; - /** *Configuration parameters for an optional private Virtual Private Cloud (VPC) containing * the resources you are using for your dominant language detection job. For more information, * see Amazon VPC.
*/ VpcConfig?: VpcConfig; - - /** - *An identifier for the job.
- */ - JobName?: string; - - /** - *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that - * grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
- */ - DataAccessRoleArn: string | undefined; - - /** - *Specifies where to send the output files.
- */ - OutputDataConfig: OutputDataConfig | undefined; } export namespace StartDominantLanguageDetectionJobRequest { @@ -4523,18 +4694,14 @@ export namespace StartDominantLanguageDetectionJobResponse { export interface StartEntitiesDetectionJobRequest { /** - *The language of the input documents. All documents must be in the same language. You can - * specify any of the languages supported by Amazon Comprehend. If custom entities recognition is - * used, this parameter is ignored and the language used for training the model is used - * instead.
+ *Specifies the format and location of the input data for the job.
*/ - LanguageCode: LanguageCode | string | undefined; + InputDataConfig: InputDataConfig | undefined; /** - *A unique identifier for the request. If you don't set the client request token, Amazon - * Comprehend generates one.
+ *Specifies where to send the output files.
*/ - ClientRequestToken?: string; + OutputDataConfig: OutputDataConfig | undefined; /** *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that @@ -4543,9 +4710,9 @@ export interface StartEntitiesDetectionJobRequest { DataAccessRoleArn: string | undefined; /** - *
Specifies the format and location of the input data for the job.
+ *The identifier of the job.
*/ - InputDataConfig: InputDataConfig | undefined; + JobName?: string; /** *The Amazon Resource Name (ARN) that identifies the specific entity recognizer to be used @@ -4554,6 +4721,20 @@ export interface StartEntitiesDetectionJobRequest { */ EntityRecognizerArn?: string; + /** + *
The language of the input documents. All documents must be in the same language. You can + * specify any of the languages supported by Amazon Comprehend. If custom entities recognition is + * used, this parameter is ignored and the language used for training the model is used + * instead.
+ */ + LanguageCode: LanguageCode | string | undefined; + + /** + *A unique identifier for the request. If you don't set the client request token, Amazon + * Comprehend generates one.
+ */ + ClientRequestToken?: string; + /** *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt * data on the storage volume attached to the ML compute instance(s) that process the analysis @@ -4572,22 +4753,12 @@ export interface StartEntitiesDetectionJobRequest { */ VolumeKmsKeyId?: string; - /** - *
Specifies where to send the output files.
- */ - OutputDataConfig: OutputDataConfig | undefined; - /** *Configuration parameters for an optional private Virtual Private Cloud (VPC) containing * the resources you are using for your entity detection job. For more information, see Amazon * VPC.
*/ VpcConfig?: VpcConfig; - - /** - *The identifier of the job.
- */ - JobName?: string; } export namespace StartEntitiesDetectionJobRequest { @@ -4597,6 +4768,12 @@ export namespace StartEntitiesDetectionJobRequest { } export interface StartEntitiesDetectionJobResponse { + /** + *The identifier generated for the job. To get the status of job, use this identifier with + * the operation.
+ */ + JobId?: string; + /** *The status of the job.
*The identifier generated for the job. To get the status of job, use this identifier with - * the operation.
+ *Specifies the format and location of the input data for the job.
+ */ + InputDataConfig: InputDataConfig | undefined; + + /** + *Specifies where to send the output files.
+ */ + OutputDataConfig: OutputDataConfig | undefined; + + /** + *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that + * grants Amazon Comprehend read access to your input data.
+ */ + DataAccessRoleArn: string | undefined; + + /** + *The identifier of the events detection job.
+ */ + JobName?: string; + + /** + *The language code of the input documents.
+ */ + LanguageCode: LanguageCode | string | undefined; + + /** + *An unique identifier for the request. If you don't set the client request token, Amazon Comprehend generates one.
+ */ + ClientRequestToken?: string; + + /** + *The types of events to detect in the input documents.
+ */ + TargetEventTypes: string[] | undefined; +} + +export namespace StartEventsDetectionJobRequest { + export const filterSensitiveLog = (obj: StartEventsDetectionJobRequest): any => ({ + ...obj, + }); +} + +export interface StartEventsDetectionJobResponse { + /** + *An unique identifier for the request. If you don't set the client request token, Amazon + * Comprehend generates one.
*/ JobId?: string; + + /** + *The status of the events detection job.
+ */ + JobStatus?: JobStatus | string; } -export namespace StartEntitiesDetectionJobResponse { - export const filterSensitiveLog = (obj: StartEntitiesDetectionJobResponse): any => ({ +export namespace StartEventsDetectionJobResponse { + export const filterSensitiveLog = (obj: StartEventsDetectionJobResponse): any => ({ ...obj, }); } export interface StartKeyPhrasesDetectionJobRequest { /** - *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt - * data on the storage volume attached to the ML compute instance(s) that process the analysis - * job. The VolumeKmsKeyId can be either of the following formats:
- *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
- *
Amazon Resource Name (ARN) of a KMS Key:
- * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
- *
Specifies the format and location of the input data for the job.
*/ - VolumeKmsKeyId?: string; + InputDataConfig: InputDataConfig | undefined; + + /** + *Specifies where to send the output files.
+ */ + OutputDataConfig: OutputDataConfig | undefined; /** - *Specifies the format and location of the input data for the job.
+ *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that + * grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
*/ - InputDataConfig: InputDataConfig | undefined; + DataAccessRoleArn: string | undefined; /** *The identifier of the job.
@@ -4677,6 +4904,24 @@ export interface StartKeyPhrasesDetectionJobRequest { */ ClientRequestToken?: string; + /** + *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt + * data on the storage volume attached to the ML compute instance(s) that process the analysis + * job. The VolumeKmsKeyId can be either of the following formats:
+ *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Amazon Resource Name (ARN) of a KMS Key:
+ * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Configuration parameters for an optional private Virtual Private Cloud (VPC) containing * the resources you are using for your key phrases detection job. For more information, see @@ -4684,17 +4929,6 @@ export interface StartKeyPhrasesDetectionJobRequest { * VPC.
*/ VpcConfig?: VpcConfig; - - /** - *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that - * grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
- */ - DataAccessRoleArn: string | undefined; - - /** - *Specifies where to send the output files.
- */ - OutputDataConfig: OutputDataConfig | undefined; } export namespace StartKeyPhrasesDetectionJobRequest { @@ -4704,6 +4938,12 @@ export namespace StartKeyPhrasesDetectionJobRequest { } export interface StartKeyPhrasesDetectionJobResponse { + /** + *The identifier generated for the job. To get the status of a job, use this identifier with + * the operation.
+ */ + JobId?: string; + /** *The status of the job.
*The identifier generated for the job. To get the status of a job, use this identifier with - * the operation.
- */ - JobId?: string; } export namespace StartKeyPhrasesDetectionJobResponse { @@ -4738,21 +4972,14 @@ export namespace StartKeyPhrasesDetectionJobResponse { export interface StartPiiEntitiesDetectionJobRequest { /** - *The language of the input documents.
- */ - LanguageCode: LanguageCode | string | undefined; - - /** - *A unique identifier for the request. If you don't set the client request token, Amazon - * Comprehend generates one.
+ *The input properties for a PII entities detection job.
*/ - ClientRequestToken?: string; + InputDataConfig: InputDataConfig | undefined; /** - *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that - * grants Amazon Comprehend read access to your input data.
+ *Provides configuration parameters for the output of PII entity detection jobs.
*/ - DataAccessRoleArn: string | undefined; + OutputDataConfig: OutputDataConfig | undefined; /** *Specifies whether the output provides the locations (offsets) of PII entities or a file in @@ -4761,27 +4988,34 @@ export interface StartPiiEntitiesDetectionJobRequest { Mode: PiiEntitiesDetectionMode | string | undefined; /** - *
The identifier of the job.
+ *Provides configuration parameters for PII entity redaction.
+ *This parameter is required if you set the Mode
parameter to
+ * ONLY_REDACTION
. In that case, you must provide a RedactionConfig
+ * definition that includes the PiiEntityTypes
parameter.
Provides configuration parameters for the output of PII entity detection jobs.
+ *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that + * grants Amazon Comprehend read access to your input data.
*/ - OutputDataConfig: OutputDataConfig | undefined; + DataAccessRoleArn: string | undefined; /** - *Provides configuration parameters for PII entity redaction.
- *This parameter is required if you set the Mode
parameter to
- * ONLY_REDACTION
. In that case, you must provide a RedactionConfig
- * definition that includes the PiiEntityTypes
parameter.
The identifier of the job.
*/ - RedactionConfig?: RedactionConfig; + JobName?: string; /** - *The input properties for a PII entities detection job.
+ *The language of the input documents.
*/ - InputDataConfig: InputDataConfig | undefined; + LanguageCode: LanguageCode | string | undefined; + + /** + *A unique identifier for the request. If you don't set the client request token, Amazon + * Comprehend generates one.
+ */ + ClientRequestToken?: string; } export namespace StartPiiEntitiesDetectionJobRequest { @@ -4810,9 +5044,9 @@ export namespace StartPiiEntitiesDetectionJobResponse { export interface StartSentimentDetectionJobRequest { /** - *The identifier of the job.
+ *Specifies the format and location of the input data for the job.
*/ - JobName?: string; + InputDataConfig: InputDataConfig | undefined; /** *Specifies where to send the output files.
@@ -4820,9 +5054,27 @@ export interface StartSentimentDetectionJobRequest { OutputDataConfig: OutputDataConfig | undefined; /** - *Specifies the format and location of the input data for the job.
+ *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that + * grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
*/ - InputDataConfig: InputDataConfig | undefined; + DataAccessRoleArn: string | undefined; + + /** + *The identifier of the job.
+ */ + JobName?: string; + + /** + *The language of the input documents. You can specify any of the primary languages + * supported by Amazon Comprehend. All documents must be in the same language.
+ */ + LanguageCode: LanguageCode | string | undefined; + + /** + *A unique identifier for the request. If you don't set the client request token, Amazon + * Comprehend generates one.
+ */ + ClientRequestToken?: string; /** *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt @@ -4842,30 +5094,12 @@ export interface StartSentimentDetectionJobRequest { */ VolumeKmsKeyId?: string; - /** - *
The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that - * grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
- */ - DataAccessRoleArn: string | undefined; - /** *Configuration parameters for an optional private Virtual Private Cloud (VPC) containing * the resources you are using for your sentiment detection job. For more information, see Amazon * VPC.
*/ VpcConfig?: VpcConfig; - - /** - *A unique identifier for the request. If you don't set the client request token, Amazon - * Comprehend generates one.
- */ - ClientRequestToken?: string; - - /** - *The language of the input documents. You can specify any of the primary languages - * supported by Amazon Comprehend. All documents must be in the same language.
- */ - LanguageCode: LanguageCode | string | undefined; } export namespace StartSentimentDetectionJobRequest { @@ -4909,22 +5143,39 @@ export namespace StartSentimentDetectionJobResponse { export interface StartTopicsDetectionJobRequest { /** - *Configuration parameters for an optional private Virtual Private Cloud (VPC) containing - * the resources you are using for your topic detection job. For more information, see Amazon - * VPC.
+ *Specifies the format and location of the input data for the job.
*/ - VpcConfig?: VpcConfig; + InputDataConfig: InputDataConfig | undefined; /** - *Specifies the format and location of the input data for the job.
+ *Specifies where to send the output files. The output is a compressed archive with two
+ * files, topic-terms.csv
that lists the terms associated with each topic, and
+ * doc-topics.csv
that lists the documents associated with each topic
The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role + * that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
+ */ + DataAccessRoleArn: string | undefined; /** *The identifier of the job.
*/ JobName?: string; + /** + *The number of topics to detect.
+ */ + NumberOfTopics?: number; + + /** + *A unique identifier for the request. If you do not set the client request token, Amazon + * Comprehend generates one.
+ */ + ClientRequestToken?: string; + /** *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt * data on the storage volume attached to the ML compute instance(s) that process the analysis @@ -4944,28 +5195,11 @@ export interface StartTopicsDetectionJobRequest { VolumeKmsKeyId?: string; /** - *
The number of topics to detect.
- */ - NumberOfTopics?: number; - - /** - *A unique identifier for the request. If you do not set the client request token, Amazon - * Comprehend generates one.
- */ - ClientRequestToken?: string; - - /** - *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role - * that grants Amazon Comprehend read access to your input data. For more information, see https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions.
- */ - DataAccessRoleArn: string | undefined; - - /** - *Specifies where to send the output files. The output is a compressed archive with two
- * files, topic-terms.csv
that lists the terms associated with each topic, and
- * doc-topics.csv
that lists the documents associated with each topic
Configuration parameters for an optional private Virtual Private Cloud (VPC) containing + * the resources you are using for your topic detection job. For more information, see Amazon + * VPC.
*/ - OutputDataConfig: OutputDataConfig | undefined; + VpcConfig?: VpcConfig; } export namespace StartTopicsDetectionJobRequest { @@ -4975,6 +5209,12 @@ export namespace StartTopicsDetectionJobRequest { } export interface StartTopicsDetectionJobResponse { + /** + *The identifier generated for the job. To get the status of the job, use this identifier
+ * with the DescribeTopicDetectionJob
operation.
The status of the job:
*The identifier generated for the job. To get the status of the job, use this identifier
- * with the DescribeTopicDetectionJob
operation.
The identifier of the entities detection job to stop.
+ */ + JobId?: string; + /** *Either STOP_REQUESTED
if the job is currently running, or
* STOPPED
if the job was previously stopped with the
* StopEntitiesDetectionJob
operation.
The identifier of the entities detection job to stop.
+ *The identifier of the events detection job to stop.
+ */ + JobId: string | undefined; +} + +export namespace StopEventsDetectionJobRequest { + export const filterSensitiveLog = (obj: StopEventsDetectionJobRequest): any => ({ + ...obj, + }); +} + +export interface StopEventsDetectionJobResponse { + /** + *The identifier of the events detection job to stop.
*/ JobId?: string; + + /** + *The status of the events detection job.
+ */ + JobStatus?: JobStatus | string; } -export namespace StopEntitiesDetectionJobResponse { - export const filterSensitiveLog = (obj: StopEntitiesDetectionJobResponse): any => ({ +export namespace StopEventsDetectionJobResponse { + export const filterSensitiveLog = (obj: StopEventsDetectionJobResponse): any => ({ ...obj, }); } @@ -5123,14 +5388,14 @@ export namespace StopPiiEntitiesDetectionJobRequest { export interface StopPiiEntitiesDetectionJobResponse { /** - *The status of the PII entities detection job.
+ *The identifier of the PII entities detection job to stop.
*/ - JobStatus?: JobStatus | string; + JobId?: string; /** - *The identifier of the PII entities detection job to stop.
+ *The status of the PII entities detection job.
*/ - JobId?: string; + JobStatus?: JobStatus | string; } export namespace StopPiiEntitiesDetectionJobResponse { @@ -5153,17 +5418,17 @@ export namespace StopSentimentDetectionJobRequest { } export interface StopSentimentDetectionJobResponse { + /** + *The identifier of the sentiment detection job to stop.
+ */ + JobId?: string; + /** *Either STOP_REQUESTED
if the job is currently running, or
* STOPPED
if the job was previously stopped with the
* StopSentimentDetectionJob
operation.
The identifier of the sentiment detection job to stop.
- */ - JobId?: string; } export namespace StopSentimentDetectionJobResponse { @@ -5307,17 +5572,17 @@ export namespace UntagResourceResponse { } export interface UpdateEndpointRequest { + /** + *The Amazon Resource Number (ARN) of the endpoint being updated.
+ */ + EndpointArn: string | undefined; + /** *The desired number of inference units to be used by the model using this endpoint. * * Each inference unit represents of a throughput of 100 characters per second.
*/ DesiredInferenceUnits: number | undefined; - - /** - *The Amazon Resource Number (ARN) of the endpoint being updated.
- */ - EndpointArn: string | undefined; } export namespace UpdateEndpointRequest { diff --git a/clients/client-comprehend/pagination/ListEventsDetectionJobsPaginator.ts b/clients/client-comprehend/pagination/ListEventsDetectionJobsPaginator.ts new file mode 100644 index 000000000000..511ff1f91f75 --- /dev/null +++ b/clients/client-comprehend/pagination/ListEventsDetectionJobsPaginator.ts @@ -0,0 +1,57 @@ +import { Comprehend } from "../Comprehend"; +import { ComprehendClient } from "../ComprehendClient"; +import { + ListEventsDetectionJobsCommand, + ListEventsDetectionJobsCommandInput, + ListEventsDetectionJobsCommandOutput, +} from "../commands/ListEventsDetectionJobsCommand"; +import { ComprehendPaginationConfiguration } from "./Interfaces"; +import { Paginator } from "@aws-sdk/types"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: ComprehendClient, + input: ListEventsDetectionJobsCommandInput, + ...args: any +): Promise+ * This operation allows you to perform batch reads and writes on data stored in DynamoDB, using PartiQL. + *
+ */ + public batchExecuteStatement( + args: BatchExecuteStatementCommandInput, + options?: __HttpHandlerOptions + ): PromiseThe BatchGetItem
operation returns the attributes of one or more items from one or
* more tables. You identify requested items by primary key.
Returns information about the status of Kinesis streaming.
+ */ + public describeKinesisStreamingDestination( + args: DescribeKinesisStreamingDestinationCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns the current provisioned-capacity quotas for your AWS account in a Region, both * for the Region as a whole and for any one DynamoDB table that you create there.
@@ -1111,6 +1207,142 @@ export class DynamoDB extends DynamoDBClient { } } + /** + *Stops replication from the DynamoDB table to the Kinesis data stream. This is done + * without deleting either of the resources.
+ */ + public disableKinesisStreamingDestination( + args: DisableKinesisStreamingDestinationCommandInput, + options?: __HttpHandlerOptions + ): PromiseStarts table data replication to the specified Kinesis data stream at a timestamp chosen + * during the enable workflow. If this operation doesn't return results immediately, use + * DescribeKinesisStreamingDestination to check if streaming to the Kinesis data stream is + * ACTIVE.
+ */ + public enableKinesisStreamingDestination( + args: EnableKinesisStreamingDestinationCommandInput, + options?: __HttpHandlerOptions + ): Promise+ * This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL. + *
+ */ + public executeStatement( + args: ExecuteStatementCommandInput, + options?: __HttpHandlerOptions + ): Promise+ * This operation allows you to perform transactional reads or writes on data stored in DynamoDB, using PartiQL. + *
+ */ + public executeTransaction( + args: ExecuteTransactionCommandInput, + options?: __HttpHandlerOptions + ): PromiseExports table data to an S3 bucket. The table must have point in time recovery * enabled, and you can export data from any time within the point in time recovery diff --git a/clients/client-dynamodb/DynamoDBClient.ts b/clients/client-dynamodb/DynamoDBClient.ts index 6b4a95c3fd38..3d4cceb1b3e1 100644 --- a/clients/client-dynamodb/DynamoDBClient.ts +++ b/clients/client-dynamodb/DynamoDBClient.ts @@ -1,3 +1,7 @@ +import { + BatchExecuteStatementCommandInput, + BatchExecuteStatementCommandOutput, +} from "./commands/BatchExecuteStatementCommand"; import { BatchGetItemCommandInput, BatchGetItemCommandOutput } from "./commands/BatchGetItemCommand"; import { BatchWriteItemCommandInput, BatchWriteItemCommandOutput } from "./commands/BatchWriteItemCommand"; import { CreateBackupCommandInput, CreateBackupCommandOutput } from "./commands/CreateBackupCommand"; @@ -25,6 +29,10 @@ import { DescribeGlobalTableSettingsCommandInput, DescribeGlobalTableSettingsCommandOutput, } from "./commands/DescribeGlobalTableSettingsCommand"; +import { + DescribeKinesisStreamingDestinationCommandInput, + DescribeKinesisStreamingDestinationCommandOutput, +} from "./commands/DescribeKinesisStreamingDestinationCommand"; import { DescribeLimitsCommandInput, DescribeLimitsCommandOutput } from "./commands/DescribeLimitsCommand"; import { DescribeTableCommandInput, DescribeTableCommandOutput } from "./commands/DescribeTableCommand"; import { @@ -32,6 +40,16 @@ import { DescribeTableReplicaAutoScalingCommandOutput, } from "./commands/DescribeTableReplicaAutoScalingCommand"; import { DescribeTimeToLiveCommandInput, DescribeTimeToLiveCommandOutput } from "./commands/DescribeTimeToLiveCommand"; +import { + DisableKinesisStreamingDestinationCommandInput, + DisableKinesisStreamingDestinationCommandOutput, +} from "./commands/DisableKinesisStreamingDestinationCommand"; +import { + EnableKinesisStreamingDestinationCommandInput, + EnableKinesisStreamingDestinationCommandOutput, +} from "./commands/EnableKinesisStreamingDestinationCommand"; +import { ExecuteStatementCommandInput, ExecuteStatementCommandOutput } from "./commands/ExecuteStatementCommand"; +import { ExecuteTransactionCommandInput, ExecuteTransactionCommandOutput } from "./commands/ExecuteTransactionCommand"; import { ExportTableToPointInTimeCommandInput, ExportTableToPointInTimeCommandOutput, @@ -131,6 +149,7 @@ import { } from "@aws-sdk/types"; export type ServiceInputTypes = + | BatchExecuteStatementCommandInput | BatchGetItemCommandInput | BatchWriteItemCommandInput | CreateBackupCommandInput @@ -146,10 +165,15 @@ export type ServiceInputTypes = | DescribeExportCommandInput | DescribeGlobalTableCommandInput | DescribeGlobalTableSettingsCommandInput + | DescribeKinesisStreamingDestinationCommandInput | DescribeLimitsCommandInput | DescribeTableCommandInput | DescribeTableReplicaAutoScalingCommandInput | DescribeTimeToLiveCommandInput + | DisableKinesisStreamingDestinationCommandInput + | EnableKinesisStreamingDestinationCommandInput + | ExecuteStatementCommandInput + | ExecuteTransactionCommandInput | ExportTableToPointInTimeCommandInput | GetItemCommandInput | ListBackupsCommandInput @@ -177,6 +201,7 @@ export type ServiceInputTypes = | UpdateTimeToLiveCommandInput; export type ServiceOutputTypes = + | BatchExecuteStatementCommandOutput | BatchGetItemCommandOutput | BatchWriteItemCommandOutput | CreateBackupCommandOutput @@ -192,10 +217,15 @@ export type ServiceOutputTypes = | DescribeExportCommandOutput | DescribeGlobalTableCommandOutput | DescribeGlobalTableSettingsCommandOutput + | DescribeKinesisStreamingDestinationCommandOutput | DescribeLimitsCommandOutput | DescribeTableCommandOutput | DescribeTableReplicaAutoScalingCommandOutput | DescribeTimeToLiveCommandOutput + | DisableKinesisStreamingDestinationCommandOutput + | EnableKinesisStreamingDestinationCommandOutput + | ExecuteStatementCommandOutput + | ExecuteTransactionCommandOutput | ExportTableToPointInTimeCommandOutput | GetItemCommandOutput | ListBackupsCommandOutput diff --git a/clients/client-dynamodb/commands/BatchExecuteStatementCommand.ts b/clients/client-dynamodb/commands/BatchExecuteStatementCommand.ts new file mode 100644 index 000000000000..210c445d3a8a --- /dev/null +++ b/clients/client-dynamodb/commands/BatchExecuteStatementCommand.ts @@ -0,0 +1,90 @@ +import { DynamoDBClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DynamoDBClient"; +import { BatchExecuteStatementInput, BatchExecuteStatementOutput } from "../models/models_0"; +import { + deserializeAws_json1_0BatchExecuteStatementCommand, + serializeAws_json1_0BatchExecuteStatementCommand, +} from "../protocols/Aws_json1_0"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export type BatchExecuteStatementCommandInput = BatchExecuteStatementInput; +export type BatchExecuteStatementCommandOutput = BatchExecuteStatementOutput & __MetadataBearer; + +/** + *
+ * This operation allows you to perform batch reads and writes on data stored in DynamoDB, using PartiQL. + *
+ */ +export class BatchExecuteStatementCommand extends $Command< + BatchExecuteStatementCommandInput, + BatchExecuteStatementCommandOutput, + DynamoDBClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: BatchExecuteStatementCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns information about the status of Kinesis streaming.
+ */ +export class DescribeKinesisStreamingDestinationCommand extends $Command< + DescribeKinesisStreamingDestinationCommandInput, + DescribeKinesisStreamingDestinationCommandOutput, + DynamoDBClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeKinesisStreamingDestinationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackStops replication from the DynamoDB table to the Kinesis data stream. This is done + * without deleting either of the resources.
+ */ +export class DisableKinesisStreamingDestinationCommand extends $Command< + DisableKinesisStreamingDestinationCommandInput, + DisableKinesisStreamingDestinationCommandOutput, + DynamoDBClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisableKinesisStreamingDestinationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackStarts table data replication to the specified Kinesis data stream at a timestamp chosen + * during the enable workflow. If this operation doesn't return results immediately, use + * DescribeKinesisStreamingDestination to check if streaming to the Kinesis data stream is + * ACTIVE.
+ */ +export class EnableKinesisStreamingDestinationCommand extends $Command< + EnableKinesisStreamingDestinationCommandInput, + EnableKinesisStreamingDestinationCommandOutput, + DynamoDBClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: EnableKinesisStreamingDestinationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack+ * This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL. + *
+ */ +export class ExecuteStatementCommand extends $Command< + ExecuteStatementCommandInput, + ExecuteStatementCommandOutput, + DynamoDBClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ExecuteStatementCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack+ * This operation allows you to perform transactional reads or writes on data stored in DynamoDB, using PartiQL. + *
+ */ +export class ExecuteTransactionCommand extends $Command< + ExecuteTransactionCommandInput, + ExecuteTransactionCommandOutput, + DynamoDBClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ExecuteTransactionCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack+ * An error associated with a statement in a PartiQL batch that was run. + *
+ */ +export interface BatchStatementError { + /** + *+ * The error code associated with the failed PartiQL batch statement. + *
+ */ + Code?: BatchStatementErrorCodeEnum | string; + + /** + *+ * The error message associated with the PartiQL batch resposne. + *
+ */ + Message?: string; +} + +export namespace BatchStatementError { + export const filterSensitiveLog = (obj: BatchStatementError): any => ({ + ...obj, + }); +} + +/** + *An error occurred on the server side.
+ */ +export interface InternalServerError extends __SmithyException, $MetadataBearer { + name: "InternalServerError"; + $fault: "server"; + /** + *The server encountered an internal error trying to fulfill the request.
+ */ + message?: string; +} + +export namespace InternalServerError { + export const filterSensitiveLog = (obj: InternalServerError): any => ({ + ...obj, + }); +} + +/** + *Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
+ */ +export interface RequestLimitExceeded extends __SmithyException, $MetadataBearer { + name: "RequestLimitExceeded"; + $fault: "client"; + message?: string; +} + +export namespace RequestLimitExceeded { + export const filterSensitiveLog = (obj: RequestLimitExceeded): any => ({ + ...obj, + }); +} + export type ReturnConsumedCapacity = "INDEXES" | "NONE" | "TOTAL"; /** @@ -1025,24 +1099,6 @@ export namespace ConsumedCapacity { }); } -/** - *An error occurred on the server side.
- */ -export interface InternalServerError extends __SmithyException, $MetadataBearer { - name: "InternalServerError"; - $fault: "server"; - /** - *The server encountered an internal error trying to fulfill the request.
- */ - message?: string; -} - -export namespace InternalServerError { - export const filterSensitiveLog = (obj: InternalServerError): any => ({ - ...obj, - }); -} - export interface InvalidEndpointException extends __SmithyException, $MetadataBearer { name: "InvalidEndpointException"; $fault: "client"; @@ -1077,21 +1133,6 @@ export namespace ProvisionedThroughputExceededException { }); } -/** - *Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
- */ -export interface RequestLimitExceeded extends __SmithyException, $MetadataBearer { - name: "RequestLimitExceeded"; - $fault: "client"; - message?: string; -} - -export namespace RequestLimitExceeded { - export const filterSensitiveLog = (obj: RequestLimitExceeded): any => ({ - ...obj, - }); -} - /** *The operation tried to access a nonexistent table or index. The resource might not be specified
* correctly, or its status might not be ACTIVE
.
The name of the table being described.
+ */ + TableName: string | undefined; +} + +export namespace DescribeKinesisStreamingDestinationInput { + export const filterSensitiveLog = (obj: DescribeKinesisStreamingDestinationInput): any => ({ + ...obj, + }); +} + +export type DestinationStatus = "ACTIVE" | "DISABLED" | "DISABLING" | "ENABLE_FAILED" | "ENABLING"; + +/** + *Describes a Kinesis data stream destination.
+ */ +export interface KinesisDataStreamDestination { + /** + *The ARN for a specific Kinesis data stream.
+ */ + StreamArn?: string; + + /** + *The current status of replication.
+ */ + DestinationStatus?: DestinationStatus | string; + + /** + *The human-readable string that corresponds to the replica status.
+ */ + DestinationStatusDescription?: string; +} + +export namespace KinesisDataStreamDestination { + export const filterSensitiveLog = (obj: KinesisDataStreamDestination): any => ({ + ...obj, + }); +} + +export interface DescribeKinesisStreamingDestinationOutput { + /** + *The name of the table being described.
+ */ + TableName?: string; + + /** + *The list of replica structures for the table being described.
+ */ + KinesisDataStreamDestinations?: KinesisDataStreamDestination[]; +} + +export namespace DescribeKinesisStreamingDestinationOutput { + export const filterSensitiveLog = (obj: DescribeKinesisStreamingDestinationOutput): any => ({ + ...obj, + }); +} + /** *Represents the input of a DescribeLimits
operation. Has no content.
The name of the DynamoDB table.
+ */ + TableName: string | undefined; + + /** + *The ARN for a Kinesis data stream.
+ */ + StreamArn: string | undefined; +} + +export namespace KinesisStreamingDestinationInput { + export const filterSensitiveLog = (obj: KinesisStreamingDestinationInput): any => ({ + ...obj, + }); +} + +export interface KinesisStreamingDestinationOutput { + /** + *The name of the table being modified.
+ */ + TableName?: string; + + /** + *The ARN for the specific Kinesis data stream.
+ */ + StreamArn?: string; + + /** + *The current status of the replication.
+ */ + DestinationStatus?: DestinationStatus | string; +} + +export namespace KinesisStreamingDestinationOutput { + export const filterSensitiveLog = (obj: KinesisStreamingDestinationOutput): any => ({ + ...obj, + }); +} + +/** + *+ * There was an attempt to insert an item with the same primary key as an item that already exists in the DynamoDB table. + *
+ */ +export interface DuplicateItemException extends __SmithyException, $MetadataBearer { + name: "DuplicateItemException"; + $fault: "client"; + message?: string; +} + +export namespace DuplicateItemException { + export const filterSensitiveLog = (obj: DuplicateItemException): any => ({ + ...obj, + }); +} + +/** + *DynamoDB rejected the request because you retried a request with a different payload but + * with an idempotent token that was already used.
+ */ +export interface IdempotentParameterMismatchException extends __SmithyException, $MetadataBearer { + name: "IdempotentParameterMismatchException"; + $fault: "client"; + Message?: string; +} + +export namespace IdempotentParameterMismatchException { + export const filterSensitiveLog = (obj: IdempotentParameterMismatchException): any => ({ + ...obj, + }); +} + +/** + *The transaction with the given request token is already in progress.
+ */ +export interface TransactionInProgressException extends __SmithyException, $MetadataBearer { + name: "TransactionInProgressException"; + $fault: "client"; + Message?: string; +} + +export namespace TransactionInProgressException { + export const filterSensitiveLog = (obj: TransactionInProgressException): any => ({ + ...obj, + }); +} + /** *There was a conflict when writing to the specified S3 bucket.
*/ @@ -4504,37 +4693,6 @@ export namespace TagResourceInput { }); } -/** - *DynamoDB rejected the request because you retried a request with a different payload but - * with an idempotent token that was already used.
- */ -export interface IdempotentParameterMismatchException extends __SmithyException, $MetadataBearer { - name: "IdempotentParameterMismatchException"; - $fault: "client"; - Message?: string; -} - -export namespace IdempotentParameterMismatchException { - export const filterSensitiveLog = (obj: IdempotentParameterMismatchException): any => ({ - ...obj, - }); -} - -/** - *The transaction with the given request token is already in progress.
- */ -export interface TransactionInProgressException extends __SmithyException, $MetadataBearer { - name: "TransactionInProgressException"; - $fault: "client"; - Message?: string; -} - -export namespace TransactionInProgressException { - export const filterSensitiveLog = (obj: TransactionInProgressException): any => ({ - ...obj, - }); -} - export interface UntagResourceInput { /** *The DynamoDB resource that the tags will be removed from. This value is an Amazon @@ -5364,14 +5522,39 @@ export namespace UpdateTimeToLiveOutput { *
For more information, see Data Types in the * Amazon DynamoDB Developer Guide.
*/ -export interface AttributeValue { +export type AttributeValue = + | AttributeValue.BMember + | AttributeValue.BOOLMember + | AttributeValue.BSMember + | AttributeValue.LMember + | AttributeValue.MMember + | AttributeValue.NMember + | AttributeValue.NSMember + | AttributeValue.NULLMember + | AttributeValue.SMember + | AttributeValue.SSMember + | AttributeValue.$UnknownMember; + +export namespace AttributeValue { /** *An attribute of type String. For example:
*
* "S": "Hello"
*
An attribute of type Number. For example:
@@ -5380,7 +5563,19 @@ export interface AttributeValue { * *Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
*/ - N?: string; + export interface NMember { + S?: never; + N: string; + B?: never; + SS?: never; + NS?: never; + BS?: never; + M?: never; + L?: never; + NULL?: never; + BOOL?: never; + $unknown?: never; + } /** *An attribute of type Binary. For example:
@@ -5388,7 +5583,19 @@ export interface AttributeValue { *"B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"
*
*/
- B?: Uint8Array;
+ export interface BMember {
+ S?: never;
+ N?: never;
+ B: Uint8Array;
+ SS?: never;
+ NS?: never;
+ BS?: never;
+ M?: never;
+ L?: never;
+ NULL?: never;
+ BOOL?: never;
+ $unknown?: never;
+ }
/**
* An attribute of type String Set. For example:
@@ -5396,7 +5603,19 @@ export interface AttributeValue { *"SS": ["Giraffe", "Hippo" ,"Zebra"]
*
*/
- SS?: string[];
+ export interface SSMember {
+ S?: never;
+ N?: never;
+ B?: never;
+ SS: string[];
+ NS?: never;
+ BS?: never;
+ M?: never;
+ L?: never;
+ NULL?: never;
+ BOOL?: never;
+ $unknown?: never;
+ }
/**
* An attribute of type Number Set. For example:
@@ -5405,7 +5624,19 @@ export interface AttributeValue { * *Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
*/ - NS?: string[]; + export interface NSMember { + S?: never; + N?: never; + B?: never; + SS?: never; + NS: string[]; + BS?: never; + M?: never; + L?: never; + NULL?: never; + BOOL?: never; + $unknown?: never; + } /** *An attribute of type Binary Set. For example:
@@ -5413,7 +5644,19 @@ export interface AttributeValue { *"BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="]
*
*/
- BS?: Uint8Array[];
+ export interface BSMember {
+ S?: never;
+ N?: never;
+ B?: never;
+ SS?: never;
+ NS?: never;
+ BS: Uint8Array[];
+ M?: never;
+ L?: never;
+ NULL?: never;
+ BOOL?: never;
+ $unknown?: never;
+ }
/**
* An attribute of type Map. For example:
@@ -5421,7 +5664,19 @@ export interface AttributeValue { *"M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}}
*
*/
- M?: { [key: string]: AttributeValue };
+ export interface MMember {
+ S?: never;
+ N?: never;
+ B?: never;
+ SS?: never;
+ NS?: never;
+ BS?: never;
+ M: { [key: string]: AttributeValue };
+ L?: never;
+ NULL?: never;
+ BOOL?: never;
+ $unknown?: never;
+ }
/**
* An attribute of type List. For example:
@@ -5429,7 +5684,19 @@ export interface AttributeValue { *"L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N", "3.14159"}]
*
*/
- L?: AttributeValue[];
+ export interface LMember {
+ S?: never;
+ N?: never;
+ B?: never;
+ SS?: never;
+ NS?: never;
+ BS?: never;
+ M?: never;
+ L: AttributeValue[];
+ NULL?: never;
+ BOOL?: never;
+ $unknown?: never;
+ }
/**
* An attribute of type Null. For example:
@@ -5437,7 +5704,19 @@ export interface AttributeValue { *"NULL": true
*
*/
- NULL?: boolean;
+ export interface NULLMember {
+ S?: never;
+ N?: never;
+ B?: never;
+ SS?: never;
+ NS?: never;
+ BS?: never;
+ M?: never;
+ L?: never;
+ NULL: boolean;
+ BOOL?: never;
+ $unknown?: never;
+ }
/**
* An attribute of type Boolean. For example:
@@ -5445,13 +5724,84 @@ export interface AttributeValue { *"BOOL": true
*
*/
- BOOL?: boolean;
-}
-
-export namespace AttributeValue {
- export const filterSensitiveLog = (obj: AttributeValue): any => ({
- ...obj,
- });
+ export interface BOOLMember {
+ S?: never;
+ N?: never;
+ B?: never;
+ SS?: never;
+ NS?: never;
+ BS?: never;
+ M?: never;
+ L?: never;
+ NULL?: never;
+ BOOL: boolean;
+ $unknown?: never;
+ }
+
+ export interface $UnknownMember {
+ S?: never;
+ N?: never;
+ B?: never;
+ SS?: never;
+ NS?: never;
+ BS?: never;
+ M?: never;
+ L?: never;
+ NULL?: never;
+ BOOL?: never;
+ $unknown: [string, any];
+ }
+
+ export interface Visitor+ * A PartiQL batch statement request. + *
+ */ +export interface BatchStatementRequest { + /** + *+ * A valid PartiQL statement. + *
+ */ + Statement: string | undefined; + + /** + *+ * The parameters associated with a PartiQL statement in the batch request. + *
+ */ + Parameters?: AttributeValue[]; + + /** + *+ * The read consistency of the PartiQL batch request. + *
+ */ + ConsistentRead?: boolean; +} + +export namespace BatchStatementRequest { + export const filterSensitiveLog = (obj: BatchStatementRequest): any => ({ + ...obj, + ...(obj.Parameters && { Parameters: obj.Parameters.map((item) => AttributeValue.filterSensitiveLog(item)) }), + }); +} + +/** + *+ * A PartiQL batch statement response.. + *
+ */ +export interface BatchStatementResponse { + /** + *+ * The error associated with a failed PartiQL batch statement. + *
+ */ + Error?: BatchStatementError; + + /** + *+ * The table name associated with a failed PartiQL batch statement. + *
+ */ + TableName?: string; + + /** + *+ * A DynamoDB item associated with a BatchStatementResponse + *
+ */ + Item?: { [key: string]: AttributeValue }; +} + +export namespace BatchStatementResponse { + export const filterSensitiveLog = (obj: BatchStatementResponse): any => ({ + ...obj, + ...(obj.Item && { + Item: Object.entries(obj.Item).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -5600,6 +6029,15 @@ export interface CancellationReason { export namespace CancellationReason { export const filterSensitiveLog = (obj: CancellationReason): any => ({ ...obj, + ...(obj.Item && { + Item: Object.entries(obj.Item).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -5791,6 +6229,9 @@ export interface Condition { export namespace Condition { export const filterSensitiveLog = (obj: Condition): any => ({ ...obj, + ...(obj.AttributeValueList && { + AttributeValueList: obj.AttributeValueList.map((item) => AttributeValue.filterSensitiveLog(item)), + }), }); } @@ -5807,6 +6248,52 @@ export interface DeleteRequest { export namespace DeleteRequest { export const filterSensitiveLog = (obj: DeleteRequest): any => ({ ...obj, + ...(obj.Key && { + Key: Object.entries(obj.Key).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + }); +} + +export interface ExecuteStatementInput { + /** + *+ * The PartiQL statement representing the operation to run. + *
+ */ + Statement: string | undefined; + + /** + *+ * The parameters for the PartiQL statement, if any. + *
+ */ + Parameters?: AttributeValue[]; + + /** + *
+ * The consistency of a read operation. If set to true
, then a strongly consistent read is used; otherwise, an eventually consistent read is used.
+ *
+ * Set this value to get remaining results, if NextToken
was returned in the statement response.
+ *
+ * Represents a PartiQL statment that uses parameters. + *
+ */ +export interface ParameterizedStatement { + /** + *+ * A PartiQL statment that uses parameters. + *
+ */ + Statement: string | undefined; + + /** + *+ * The parameter values. + *
+ */ + Parameters?: AttributeValue[]; +} + +export namespace ParameterizedStatement { + export const filterSensitiveLog = (obj: ParameterizedStatement): any => ({ + ...obj, + ...(obj.Parameters && { Parameters: obj.Parameters.map((item) => AttributeValue.filterSensitiveLog(item)) }), }); } @@ -6040,6 +6600,48 @@ export interface PutRequest { export namespace PutRequest { export const filterSensitiveLog = (obj: PutRequest): any => ({ ...obj, + ...(obj.Item && { + Item: Object.entries(obj.Item).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + }); +} + +export interface ExecuteStatementOutput { + /** + *+ * If a read operation was used, this property will contain the result of the reade operation; a map of attribute names and their values. For the write operations this value will be empty. + *
+ */ + Items?: { [key: string]: AttributeValue }[]; + + /** + *+ * If the response of a read request exceeds the response payload limit DynamoDB will set this value in the response. If set, you can use that this value in the subsequent request to get the remaining results. + *
+ */ + NextToken?: string; +} + +export namespace ExecuteStatementOutput { + export const filterSensitiveLog = (obj: ExecuteStatementOutput): any => ({ + ...obj, + ...(obj.Items && { + Items: obj.Items.map((item) => + Object.entries(item).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ) + ), + }), }); } @@ -6124,6 +6726,17 @@ export interface KeysAndAttributes { export namespace KeysAndAttributes { export const filterSensitiveLog = (obj: KeysAndAttributes): any => ({ ...obj, + ...(obj.Keys && { + Keys: obj.Keys.map((item) => + Object.entries(item).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ) + ), + }), }); } @@ -6142,6 +6755,76 @@ export interface TransactGetItem { export namespace TransactGetItem { export const filterSensitiveLog = (obj: TransactGetItem): any => ({ ...obj, + ...(obj.Get && { Get: Get.filterSensitiveLog(obj.Get) }), + }); +} + +export interface BatchExecuteStatementInput { + /** + *+ * The list of PartiQL statements representing the batch to run. + *
+ */ + Statements: BatchStatementRequest[] | undefined; +} + +export namespace BatchExecuteStatementInput { + export const filterSensitiveLog = (obj: BatchExecuteStatementInput): any => ({ + ...obj, + }); +} + +export interface BatchExecuteStatementOutput { + /** + *+ * The response to each PartiQL statement in the batch. + *
+ */ + Responses?: BatchStatementResponse[]; +} + +export namespace BatchExecuteStatementOutput { + export const filterSensitiveLog = (obj: BatchExecuteStatementOutput): any => ({ + ...obj, + ...(obj.Responses && { Responses: obj.Responses.map((item) => BatchStatementResponse.filterSensitiveLog(item)) }), + }); +} + +export interface ExecuteTransactionInput { + /** + *+ * The list of PartiQL statements representing the transaction to run. + *
+ */ + TransactStatements: ParameterizedStatement[] | undefined; + + /** + *
+ * Set this value to get remaining results, if NextToken
was returned in the statement response.
+ *
+ * The response to a PartiQL transaction. + *
+ */ + Responses?: ItemResponse[]; +} + +export namespace ExecuteTransactionOutput { + export const filterSensitiveLog = (obj: ExecuteTransactionOutput): any => ({ + ...obj, + ...(obj.Responses && { Responses: obj.Responses.map((item) => ItemResponse.filterSensitiveLog(item)) }), }); } @@ -6171,6 +6854,7 @@ export interface TransactGetItemsOutput { export namespace TransactGetItemsOutput { export const filterSensitiveLog = (obj: TransactGetItemsOutput): any => ({ ...obj, + ...(obj.Responses && { Responses: obj.Responses.map((item) => ItemResponse.filterSensitiveLog(item)) }), }); } @@ -6392,6 +7076,9 @@ export interface TransactionCanceledException extends __SmithyException, $Metada export namespace TransactionCanceledException { export const filterSensitiveLog = (obj: TransactionCanceledException): any => ({ ...obj, + ...(obj.CancellationReasons && { + CancellationReasons: obj.CancellationReasons.map((item) => CancellationReason.filterSensitiveLog(item)), + }), }); } @@ -6745,6 +7432,10 @@ export interface ExpectedAttributeValue { export namespace ExpectedAttributeValue { export const filterSensitiveLog = (obj: ExpectedAttributeValue): any => ({ ...obj, + ...(obj.Value && { Value: AttributeValue.filterSensitiveLog(obj.Value) }), + ...(obj.AttributeValueList && { + AttributeValueList: obj.AttributeValueList.map((item) => AttributeValue.filterSensitiveLog(item)), + }), }); } @@ -6766,6 +7457,9 @@ export interface TransactGetItemsInput { export namespace TransactGetItemsInput { export const filterSensitiveLog = (obj: TransactGetItemsInput): any => ({ ...obj, + ...(obj.TransactItems && { + TransactItems: obj.TransactItems.map((item) => TransactGetItem.filterSensitiveLog(item)), + }), }); } @@ -6836,6 +7530,24 @@ export interface ConditionCheck { export namespace ConditionCheck { export const filterSensitiveLog = (obj: ConditionCheck): any => ({ ...obj, + ...(obj.Key && { + Key: Object.entries(obj.Key).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExpressionAttributeValues && { + ExpressionAttributeValues: Object.entries(obj.ExpressionAttributeValues).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -6881,6 +7593,24 @@ export interface Delete { export namespace Delete { export const filterSensitiveLog = (obj: Delete): any => ({ ...obj, + ...(obj.Key && { + Key: Object.entries(obj.Key).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExpressionAttributeValues && { + ExpressionAttributeValues: Object.entries(obj.ExpressionAttributeValues).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -6929,6 +7659,24 @@ export interface Put { export namespace Put { export const filterSensitiveLog = (obj: Put): any => ({ ...obj, + ...(obj.Item && { + Item: Object.entries(obj.Item).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExpressionAttributeValues && { + ExpressionAttributeValues: Object.entries(obj.ExpressionAttributeValues).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -6981,6 +7729,24 @@ export interface Update { export namespace Update { export const filterSensitiveLog = (obj: Update): any => ({ ...obj, + ...(obj.Key && { + Key: Object.entries(obj.Key).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExpressionAttributeValues && { + ExpressionAttributeValues: Object.entries(obj.ExpressionAttributeValues).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -7039,6 +7805,18 @@ export interface DeleteItemOutput { export namespace DeleteItemOutput { export const filterSensitiveLog = (obj: DeleteItemOutput): any => ({ ...obj, + ...(obj.Attributes && { + Attributes: Object.entries(obj.Attributes).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ItemCollectionMetrics && { + ItemCollectionMetrics: ItemCollectionMetrics.filterSensitiveLog(obj.ItemCollectionMetrics), + }), }); } @@ -7095,6 +7873,18 @@ export interface PutItemOutput { export namespace PutItemOutput { export const filterSensitiveLog = (obj: PutItemOutput): any => ({ ...obj, + ...(obj.Attributes && { + Attributes: Object.entries(obj.Attributes).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ItemCollectionMetrics && { + ItemCollectionMetrics: ItemCollectionMetrics.filterSensitiveLog(obj.ItemCollectionMetrics), + }), }); } @@ -7148,6 +7938,26 @@ export interface QueryOutput { export namespace QueryOutput { export const filterSensitiveLog = (obj: QueryOutput): any => ({ ...obj, + ...(obj.Items && { + Items: obj.Items.map((item) => + Object.entries(item).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ) + ), + }), + ...(obj.LastEvaluatedKey && { + LastEvaluatedKey: Object.entries(obj.LastEvaluatedKey).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -7206,6 +8016,26 @@ export interface ScanOutput { export namespace ScanOutput { export const filterSensitiveLog = (obj: ScanOutput): any => ({ ...obj, + ...(obj.Items && { + Items: obj.Items.map((item) => + Object.entries(item).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ) + ), + }), + ...(obj.LastEvaluatedKey && { + LastEvaluatedKey: Object.entries(obj.LastEvaluatedKey).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -7264,6 +8094,18 @@ export interface UpdateItemOutput { export namespace UpdateItemOutput { export const filterSensitiveLog = (obj: UpdateItemOutput): any => ({ ...obj, + ...(obj.Attributes && { + Attributes: Object.entries(obj.Attributes).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ItemCollectionMetrics && { + ItemCollectionMetrics: ItemCollectionMetrics.filterSensitiveLog(obj.ItemCollectionMetrics), + }), }); } @@ -7288,6 +8130,8 @@ export interface WriteRequest { export namespace WriteRequest { export const filterSensitiveLog = (obj: WriteRequest): any => ({ ...obj, + ...(obj.PutRequest && { PutRequest: PutRequest.filterSensitiveLog(obj.PutRequest) }), + ...(obj.DeleteRequest && { DeleteRequest: DeleteRequest.filterSensitiveLog(obj.DeleteRequest) }), }); } @@ -7626,6 +8470,33 @@ export interface ScanInput { export namespace ScanInput { export const filterSensitiveLog = (obj: ScanInput): any => ({ ...obj, + ...(obj.ScanFilter && { + ScanFilter: Object.entries(obj.ScanFilter).reduce( + (acc: any, [key, value]: [string, Condition]) => ({ + ...acc, + [key]: Condition.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExclusiveStartKey && { + ExclusiveStartKey: Object.entries(obj.ExclusiveStartKey).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExpressionAttributeValues && { + ExpressionAttributeValues: Object.entries(obj.ExpressionAttributeValues).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -7878,6 +8749,33 @@ export interface DeleteItemInput { export namespace DeleteItemInput { export const filterSensitiveLog = (obj: DeleteItemInput): any => ({ ...obj, + ...(obj.Key && { + Key: Object.entries(obj.Key).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.Expected && { + Expected: Object.entries(obj.Expected).reduce( + (acc: any, [key, value]: [string, ExpectedAttributeValue]) => ({ + ...acc, + [key]: ExpectedAttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExpressionAttributeValues && { + ExpressionAttributeValues: Object.entries(obj.ExpressionAttributeValues).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -8058,6 +8956,33 @@ export interface PutItemInput { export namespace PutItemInput { export const filterSensitiveLog = (obj: PutItemInput): any => ({ ...obj, + ...(obj.Item && { + Item: Object.entries(obj.Item).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.Expected && { + Expected: Object.entries(obj.Expected).reduce( + (acc: any, [key, value]: [string, ExpectedAttributeValue]) => ({ + ...acc, + [key]: ExpectedAttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExpressionAttributeValues && { + ExpressionAttributeValues: Object.entries(obj.ExpressionAttributeValues).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -8424,6 +9349,42 @@ export interface QueryInput { export namespace QueryInput { export const filterSensitiveLog = (obj: QueryInput): any => ({ ...obj, + ...(obj.KeyConditions && { + KeyConditions: Object.entries(obj.KeyConditions).reduce( + (acc: any, [key, value]: [string, Condition]) => ({ + ...acc, + [key]: Condition.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.QueryFilter && { + QueryFilter: Object.entries(obj.QueryFilter).reduce( + (acc: any, [key, value]: [string, Condition]) => ({ + ...acc, + [key]: Condition.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExclusiveStartKey && { + ExclusiveStartKey: Object.entries(obj.ExclusiveStartKey).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExpressionAttributeValues && { + ExpressionAttributeValues: Object.entries(obj.ExpressionAttributeValues).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -8805,6 +9766,42 @@ export interface UpdateItemInput { export namespace UpdateItemInput { export const filterSensitiveLog = (obj: UpdateItemInput): any => ({ ...obj, + ...(obj.Key && { + Key: Object.entries(obj.Key).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.AttributeUpdates && { + AttributeUpdates: Object.entries(obj.AttributeUpdates).reduce( + (acc: any, [key, value]: [string, AttributeValueUpdate]) => ({ + ...acc, + [key]: AttributeValueUpdate.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.Expected && { + Expected: Object.entries(obj.Expected).reduce( + (acc: any, [key, value]: [string, ExpectedAttributeValue]) => ({ + ...acc, + [key]: ExpectedAttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), + ...(obj.ExpressionAttributeValues && { + ExpressionAttributeValues: Object.entries(obj.ExpressionAttributeValues).reduce( + (acc: any, [key, value]: [string, AttributeValue]) => ({ + ...acc, + [key]: AttributeValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } @@ -8836,6 +9833,10 @@ export interface TransactWriteItem { export namespace TransactWriteItem { export const filterSensitiveLog = (obj: TransactWriteItem): any => ({ ...obj, + ...(obj.ConditionCheck && { ConditionCheck: ConditionCheck.filterSensitiveLog(obj.ConditionCheck) }), + ...(obj.Put && { Put: Put.filterSensitiveLog(obj.Put) }), + ...(obj.Delete && { Delete: Delete.filterSensitiveLog(obj.Delete) }), + ...(obj.Update && { Update: Update.filterSensitiveLog(obj.Update) }), }); } @@ -8902,5 +9903,8 @@ export interface TransactWriteItemsInput { export namespace TransactWriteItemsInput { export const filterSensitiveLog = (obj: TransactWriteItemsInput): any => ({ ...obj, + ...(obj.TransactItems && { + TransactItems: obj.TransactItems.map((item) => TransactWriteItem.filterSensitiveLog(item)), + }), }); } diff --git a/clients/client-dynamodb/protocols/Aws_json1_0.ts b/clients/client-dynamodb/protocols/Aws_json1_0.ts index afc8998d0fe5..67053470b1f6 100644 --- a/clients/client-dynamodb/protocols/Aws_json1_0.ts +++ b/clients/client-dynamodb/protocols/Aws_json1_0.ts @@ -1,3 +1,7 @@ +import { + BatchExecuteStatementCommandInput, + BatchExecuteStatementCommandOutput, +} from "../commands/BatchExecuteStatementCommand"; import { BatchGetItemCommandInput, BatchGetItemCommandOutput } from "../commands/BatchGetItemCommand"; import { BatchWriteItemCommandInput, BatchWriteItemCommandOutput } from "../commands/BatchWriteItemCommand"; import { CreateBackupCommandInput, CreateBackupCommandOutput } from "../commands/CreateBackupCommand"; @@ -25,6 +29,10 @@ import { DescribeGlobalTableSettingsCommandInput, DescribeGlobalTableSettingsCommandOutput, } from "../commands/DescribeGlobalTableSettingsCommand"; +import { + DescribeKinesisStreamingDestinationCommandInput, + DescribeKinesisStreamingDestinationCommandOutput, +} from "../commands/DescribeKinesisStreamingDestinationCommand"; import { DescribeLimitsCommandInput, DescribeLimitsCommandOutput } from "../commands/DescribeLimitsCommand"; import { DescribeTableCommandInput, DescribeTableCommandOutput } from "../commands/DescribeTableCommand"; import { @@ -32,6 +40,16 @@ import { DescribeTableReplicaAutoScalingCommandOutput, } from "../commands/DescribeTableReplicaAutoScalingCommand"; import { DescribeTimeToLiveCommandInput, DescribeTimeToLiveCommandOutput } from "../commands/DescribeTimeToLiveCommand"; +import { + DisableKinesisStreamingDestinationCommandInput, + DisableKinesisStreamingDestinationCommandOutput, +} from "../commands/DisableKinesisStreamingDestinationCommand"; +import { + EnableKinesisStreamingDestinationCommandInput, + EnableKinesisStreamingDestinationCommandOutput, +} from "../commands/EnableKinesisStreamingDestinationCommand"; +import { ExecuteStatementCommandInput, ExecuteStatementCommandOutput } from "../commands/ExecuteStatementCommand"; +import { ExecuteTransactionCommandInput, ExecuteTransactionCommandOutput } from "../commands/ExecuteTransactionCommand"; import { ExportTableToPointInTimeCommandInput, ExportTableToPointInTimeCommandOutput, @@ -97,8 +115,13 @@ import { BackupInUseException, BackupNotFoundException, BackupSummary, + BatchExecuteStatementInput, + BatchExecuteStatementOutput, BatchGetItemInput, BatchGetItemOutput, + BatchStatementError, + BatchStatementRequest, + BatchStatementResponse, BatchWriteItemInput, BatchWriteItemOutput, BillingModeSummary, @@ -145,6 +168,8 @@ import { DescribeGlobalTableOutput, DescribeGlobalTableSettingsInput, DescribeGlobalTableSettingsOutput, + DescribeKinesisStreamingDestinationInput, + DescribeKinesisStreamingDestinationOutput, DescribeLimitsInput, DescribeLimitsOutput, DescribeTableInput, @@ -153,7 +178,12 @@ import { DescribeTableReplicaAutoScalingOutput, DescribeTimeToLiveInput, DescribeTimeToLiveOutput, + DuplicateItemException, Endpoint, + ExecuteStatementInput, + ExecuteStatementOutput, + ExecuteTransactionInput, + ExecuteTransactionOutput, ExpectedAttributeValue, ExportConflictException, ExportDescription, @@ -186,6 +216,9 @@ import { ItemResponse, KeySchemaElement, KeysAndAttributes, + KinesisDataStreamDestination, + KinesisStreamingDestinationInput, + KinesisStreamingDestinationOutput, LimitExceededException, ListBackupsInput, ListBackupsOutput, @@ -202,6 +235,7 @@ import { LocalSecondaryIndex, LocalSecondaryIndexDescription, LocalSecondaryIndexInfo, + ParameterizedStatement, PointInTimeRecoveryDescription, PointInTimeRecoverySpecification, PointInTimeRecoveryUnavailableException, @@ -298,6 +332,19 @@ import { } from "@aws-sdk/types"; import { v4 as generateIdempotencyToken } from "uuid"; +export const serializeAws_json1_0BatchExecuteStatementCommand = async ( + input: BatchExecuteStatementCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "Content-Type": "application/x-amz-json-1.0", + "X-Amz-Target": "DynamoDB_20120810.BatchExecuteStatement", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0BatchExecuteStatementInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_0BatchGetItemCommand = async ( input: BatchGetItemCommandInput, context: __SerdeContext @@ -493,6 +540,19 @@ export const serializeAws_json1_0DescribeGlobalTableSettingsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_0DescribeKinesisStreamingDestinationCommand = async ( + input: DescribeKinesisStreamingDestinationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "Content-Type": "application/x-amz-json-1.0", + "X-Amz-Target": "DynamoDB_20120810.DescribeKinesisStreamingDestination", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0DescribeKinesisStreamingDestinationInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_0DescribeLimitsCommand = async ( input: DescribeLimitsCommandInput, context: __SerdeContext @@ -545,6 +605,58 @@ export const serializeAws_json1_0DescribeTimeToLiveCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_0DisableKinesisStreamingDestinationCommand = async ( + input: DisableKinesisStreamingDestinationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "Content-Type": "application/x-amz-json-1.0", + "X-Amz-Target": "DynamoDB_20120810.DisableKinesisStreamingDestination", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0KinesisStreamingDestinationInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0EnableKinesisStreamingDestinationCommand = async ( + input: EnableKinesisStreamingDestinationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "Content-Type": "application/x-amz-json-1.0", + "X-Amz-Target": "DynamoDB_20120810.EnableKinesisStreamingDestination", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0KinesisStreamingDestinationInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ExecuteStatementCommand = async ( + input: ExecuteStatementCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "Content-Type": "application/x-amz-json-1.0", + "X-Amz-Target": "DynamoDB_20120810.ExecuteStatement", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ExecuteStatementInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ExecuteTransactionCommand = async ( + input: ExecuteTransactionCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "Content-Type": "application/x-amz-json-1.0", + "X-Amz-Target": "DynamoDB_20120810.ExecuteTransaction", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ExecuteTransactionInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_0ExportTableToPointInTimeCommand = async ( input: ExportTableToPointInTimeCommandInput, context: __SerdeContext @@ -870,6 +982,69 @@ export const serializeAws_json1_0UpdateTimeToLiveCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const deserializeAws_json1_0BatchExecuteStatementCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseInformation about the Private DNS name for interface endpoints.
+ */ +export interface PrivateDnsDetails { + /** + *The private DNS name assigned to the VPC endpoint service.
+ */ + PrivateDnsName?: string; +} + +export namespace PrivateDnsDetails { + export const filterSensitiveLog = (obj: PrivateDnsDetails): any => ({ + ...obj, + }); +} + /** *Describes a VPC endpoint service.
*/ @@ -8148,6 +8164,11 @@ export interface ServiceDetail { */ PrivateDnsName?: string; + /** + *The private DNS names assigned to the VPC endpoint service.
+ */ + PrivateDnsNames?: PrivateDnsDetails[]; + /** *Indicates whether the service supports endpoint policies.
*/ @@ -10692,18 +10713,3 @@ export namespace GetEbsDefaultKmsKeyIdResult { ...obj, }); } - -export interface GetEbsEncryptionByDefaultRequest { - /** - *Checks whether you have the required permissions for the action, without actually making the request,
- * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
- * Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request,
+ * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
+ * Otherwise, it is UnauthorizedOperation
.
Indicates whether encryption by default is enabled.
diff --git a/clients/client-ec2/pagination/DescribeByoipCidrsPaginator.ts b/clients/client-ec2/pagination/DescribeByoipCidrsPaginator.ts new file mode 100644 index 000000000000..3549bdd07b15 --- /dev/null +++ b/clients/client-ec2/pagination/DescribeByoipCidrsPaginator.ts @@ -0,0 +1,57 @@ +import { EC2 } from "../EC2"; +import { EC2Client } from "../EC2Client"; +import { + DescribeByoipCidrsCommand, + DescribeByoipCidrsCommandInput, + DescribeByoipCidrsCommandOutput, +} from "../commands/DescribeByoipCidrsCommand"; +import { EC2PaginationConfiguration } from "./Interfaces"; +import { Paginator } from "@aws-sdk/types"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: EC2Client, + input: DescribeByoipCidrsCommandInput, + ...args: any +): PromiseModifies the parameters for a capacity provider.
+ */ + public updateCapacityProvider( + args: UpdateCapacityProviderCommandInput, + options?: __HttpHandlerOptions + ): PromiseModifies the settings to use for a cluster.
*/ diff --git a/clients/client-ecs/ECSClient.ts b/clients/client-ecs/ECSClient.ts index 375864a28933..ebd58433e908 100644 --- a/clients/client-ecs/ECSClient.ts +++ b/clients/client-ecs/ECSClient.ts @@ -104,6 +104,10 @@ import { } from "./commands/SubmitTaskStateChangeCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { + UpdateCapacityProviderCommandInput, + UpdateCapacityProviderCommandOutput, +} from "./commands/UpdateCapacityProviderCommand"; import { UpdateClusterSettingsCommandInput, UpdateClusterSettingsCommandOutput, @@ -215,6 +219,7 @@ export type ServiceInputTypes = | SubmitTaskStateChangeCommandInput | TagResourceCommandInput | UntagResourceCommandInput + | UpdateCapacityProviderCommandInput | UpdateClusterSettingsCommandInput | UpdateContainerAgentCommandInput | UpdateContainerInstancesStateCommandInput @@ -266,6 +271,7 @@ export type ServiceOutputTypes = | SubmitTaskStateChangeCommandOutput | TagResourceCommandOutput | UntagResourceCommandOutput + | UpdateCapacityProviderCommandOutput | UpdateClusterSettingsCommandOutput | UpdateContainerAgentCommandOutput | UpdateContainerInstancesStateCommandOutput diff --git a/clients/client-ecs/commands/UpdateCapacityProviderCommand.ts b/clients/client-ecs/commands/UpdateCapacityProviderCommand.ts new file mode 100644 index 000000000000..f42429fb10b2 --- /dev/null +++ b/clients/client-ecs/commands/UpdateCapacityProviderCommand.ts @@ -0,0 +1,88 @@ +import { ECSClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ECSClient"; +import { UpdateCapacityProviderRequest, UpdateCapacityProviderResponse } from "../models/models_0"; +import { + deserializeAws_json1_1UpdateCapacityProviderCommand, + serializeAws_json1_1UpdateCapacityProviderCommand, +} from "../protocols/Aws_json1_1"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export type UpdateCapacityProviderCommandInput = UpdateCapacityProviderRequest; +export type UpdateCapacityProviderCommandOutput = UpdateCapacityProviderResponse & __MetadataBearer; + +/** + *Modifies the parameters for a capacity provider.
+ */ +export class UpdateCapacityProviderCommand extends $Command< + UpdateCapacityProviderCommandInput, + UpdateCapacityProviderCommandOutput, + ECSClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateCapacityProviderCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale
- * in process is not affected by this parameter If this parameter is omitted, the default
- * value of 1
is used.
When additional capacity is required, Amazon ECS will scale up the minimum scaling step - * size even if the actual demand is less than the minimum scaling step size.
- *If you use a capacity provider with an Auto Scaling group configured with more than - * one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum - * scaling step size value and will ignore both the maximum scaling step size as well as - * the capacity demand.
- */ - minimumScalingStepSize?: number; - - /** - *The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale
- * in process is not affected by this parameter. If this parameter is omitted, the default
- * value of 10000
is used.
Whether or not to enable managed scaling for the capacity provider.
*/ - maximumScalingStepSize?: number; + status?: ManagedScalingStatus | string; /** *The target capacity value for the capacity provider. The specified value must be @@ -86,9 +71,24 @@ export interface ManagedScaling { targetCapacity?: number; /** - *
Whether or not to enable managed scaling for the capacity provider.
+ *The minimum number of container instances that Amazon ECS will scale in or scale out at one
+ * time. If this parameter is omitted, the default value of 1
is used.
The maximum number of container instances that Amazon ECS will scale in or scale out at one
+ * time. If this parameter is omitted, the default value of 10000
is
+ * used.
The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute
+ * to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value
+ * of 300
seconds is used.
The details of the Auto Scaling group for the capacity provider.
*/ export interface AutoScalingGroupProvider { + /** + *The Amazon Resource Name (ARN) that identifies the Auto Scaling group.
+ */ + autoScalingGroupArn: string | undefined; + + /** + *The managed scaling settings for the Auto Scaling group capacity provider.
+ */ + managedScaling?: ManagedScaling; + /** *The managed termination protection setting to use for the Auto Scaling group capacity * provider. This determines whether the Auto Scaling group has managed termination @@ -122,16 +132,6 @@ export interface AutoScalingGroupProvider { * protected from termination when the Auto Scaling group scales in.
*/ managedTerminationProtection?: ManagedTerminationProtection | string; - - /** - *The managed scaling settings for the Auto Scaling group capacity provider.
- */ - managedScaling?: ManagedScaling; - - /** - *The Amazon Resource Name (ARN) that identifies the Auto Scaling group.
- */ - autoScalingGroupArn: string | undefined; } export namespace AutoScalingGroupProvider { @@ -176,17 +176,17 @@ export namespace AutoScalingGroupProvider { * */ export interface Tag { - /** - *The optional part of a key-value pair that make up a tag. A value
acts as
- * a descriptor within a tag category (key).
One part of a key-value pair that make up a tag. A key
is a general label
* that acts like a category for more specific tag values.
The optional part of a key-value pair that make up a tag. A value
acts as
+ * a descriptor within a tag category (key).
The details of the Auto Scaling group for the capacity provider.
- */ - autoScalingGroupProvider: AutoScalingGroupProvider | undefined; - /** *The name of the capacity provider. Up to 255 characters are allowed, including letters * (upper and lowercase), numbers, underscores, and hyphens. The name cannot be prefixed @@ -208,6 +203,11 @@ export interface CreateCapacityProviderRequest { */ name: string | undefined; + /** + *
The details of the Auto Scaling group for the capacity provider.
+ */ + autoScalingGroupProvider: AutoScalingGroupProvider | undefined; + /** *The metadata that you apply to the capacity provider to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which you @@ -262,12 +262,59 @@ export enum CapacityProviderUpdateStatus { DELETE_COMPLETE = "DELETE_COMPLETE", DELETE_FAILED = "DELETE_FAILED", DELETE_IN_PROGRESS = "DELETE_IN_PROGRESS", + UPDATE_COMPLETE = "UPDATE_COMPLETE", + UPDATE_FAILED = "UPDATE_FAILED", + UPDATE_IN_PROGRESS = "UPDATE_IN_PROGRESS", } /** *
The details of a capacity provider.
*/ export interface CapacityProvider { + /** + *The Amazon Resource Name (ARN) that identifies the capacity provider.
+ */ + capacityProviderArn?: string; + + /** + *The name of the capacity provider.
+ */ + name?: string; + + /** + *The current status of the capacity provider. Only capacity providers in an
+ * ACTIVE
state can be used in a cluster. When a capacity provider is
+ * successfully deleted, it will have an INACTIVE
status.
The Auto Scaling group settings for the capacity provider.
+ */ + autoScalingGroupProvider?: AutoScalingGroupProvider; + + /** + *The update status of the capacity provider. The following are the possible states that + * will be returned.
+ *The capacity provider is in the process of being deleted.
+ *The capacity provider has been successfully deleted and will have an
+ * INACTIVE
status.
The capacity provider was unable to be deleted. The update status reason + * will provide further details about why the delete failed.
+ *The update status reason. This provides further details about the update status for * the capacity provider.
@@ -311,50 +358,6 @@ export interface CapacityProvider { * */ tags?: Tag[]; - - /** - *The current status of the capacity provider. Only capacity providers in an
- * ACTIVE
state can be used in a cluster. When a capacity provider is
- * successfully deleted, it will have an INACTIVE
status.
The Amazon Resource Name (ARN) that identifies the capacity provider.
- */ - capacityProviderArn?: string; - - /** - *The Auto Scaling group settings for the capacity provider.
- */ - autoScalingGroupProvider?: AutoScalingGroupProvider; - - /** - *The name of the capacity provider.
- */ - name?: string; - - /** - *The update status of the capacity provider. The following are the possible states that - * will be returned.
- *The capacity provider is in the process of being deleted.
- *The capacity provider has been successfully deleted and will have an
- * INACTIVE
status.
The capacity provider was unable to be deleted. The update status reason - * will provide further details about why the delete failed.
- *The details of a capacity provider strategy.
*/ export interface CapacityProviderStrategyItem { + /** + *The short name of the capacity provider.
+ */ + capacityProvider: string | undefined; + /** *The weight value designates the relative percentage of the total * number of tasks launched that should use the specified capacity provider.
@@ -458,11 +466,6 @@ export interface CapacityProviderStrategyItem { */ weight?: number; - /** - *The short name of the capacity provider.
- */ - capacityProvider: string | undefined; - /** *The base value designates how many tasks, at a minimum, to run on * the specified capacity provider. Only one capacity provider in a capacity provider @@ -553,29 +556,6 @@ export interface CreateClusterRequest { */ tags?: Tag[]; - /** - *
The capacity provider strategy to use by default for the cluster.
- *When creating a service or running a task on a cluster, if no capacity provider or - * launch type is specified then the default capacity provider strategy for the cluster is - * used.
- *A capacity provider strategy consists of one or more capacity providers along with the
- * base
and weight
to assign to them. A capacity provider
- * must be associated with the cluster to be used in a capacity provider strategy. The
- * PutClusterCapacityProviders API is used to associate a capacity
- * provider with a cluster. Only capacity providers with an ACTIVE
or
- * UPDATING
status can be used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity - * provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
- *To use a AWS Fargate capacity provider, specify either the FARGATE
or
- * FARGATE_SPOT
capacity providers. The AWS Fargate capacity providers are
- * available to all accounts and only need to be associated with a cluster to be
- * used.
If a default capacity provider strategy is not defined for a cluster during creation, - * it can be defined later with the PutClusterCapacityProviders API - * operation.
- */ - defaultCapacityProviderStrategy?: CapacityProviderStrategyItem[]; - /** *The setting to use when creating a cluster. This parameter is used to enable CloudWatch * Container Insights for a cluster. If this value is specified, it will override the @@ -599,6 +579,29 @@ export interface CreateClusterRequest { * list of available capacity providers for a cluster after the cluster is created.
*/ capacityProviders?: string[]; + + /** + *The capacity provider strategy to use by default for the cluster.
+ *When creating a service or running a task on a cluster, if no capacity provider or + * launch type is specified then the default capacity provider strategy for the cluster is + * used.
+ *A capacity provider strategy consists of one or more capacity providers along with the
+ * base
and weight
to assign to them. A capacity provider
+ * must be associated with the cluster to be used in a capacity provider strategy. The
+ * PutClusterCapacityProviders API is used to associate a capacity
+ * provider with a cluster. Only capacity providers with an ACTIVE
or
+ * UPDATING
status can be used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity + * provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
+ *To use a AWS Fargate capacity provider, specify either the FARGATE
or
+ * FARGATE_SPOT
capacity providers. The AWS Fargate capacity providers are
+ * available to all accounts and only need to be associated with a cluster to be
+ * used.
If a default capacity provider strategy is not defined for a cluster during creation, + * it can be defined later with the PutClusterCapacityProviders API + * operation.
+ */ + defaultCapacityProviderStrategy?: CapacityProviderStrategyItem[]; } export namespace CreateClusterRequest { @@ -612,16 +615,16 @@ export namespace CreateClusterRequest { */ export interface KeyValuePair { /** - *The value of the key-value pair. For environment variables, this is the value of the + *
The name of the key-value pair. For environment variables, this is the name of the * environment variable.
*/ - value?: string; + name?: string; /** - *The name of the key-value pair. For environment variables, this is the name of the + *
The value of the key-value pair. For environment variables, this is the value of the * environment variable.
*/ - name?: string; + value?: string; } export namespace KeyValuePair { @@ -635,11 +638,9 @@ export namespace KeyValuePair { */ export interface Attachment { /** - * The status of the attachment. Valid values are PRECREATED
,
- * CREATED
, ATTACHING
, ATTACHED
,
- * DETACHING
, DETACHED
, and DELETED
.
The unique identifier for the attachment.
*/ - status?: string; + id?: string; /** *The type of the attachment, such as ElasticNetworkInterface
.
The unique identifier for the attachment.
+ * The status of the attachment. Valid values are PRECREATED
,
+ * CREATED
, ATTACHING
, ATTACHED
,
+ * DETACHING
, DETACHED
, and DELETED
.
Details of the attachment. For elastic network interfaces, this includes the network @@ -672,71 +675,14 @@ export namespace Attachment { */ export interface Cluster { /** - *
The settings for the cluster. This parameter indicates whether CloudWatch Container Insights - * is enabled or disabled for a cluster.
- */ - settings?: ClusterSetting[]; - - /** - *The number of services that are running on the cluster in an ACTIVE
- * state. You can view these services with ListServices.
The resources attached to a cluster. When using a capacity provider with a cluster, - * the Auto Scaling plan that is created will be returned as a cluster attachment.
- */ - attachments?: Attachment[]; - - /** - *The capacity providers associated with the cluster.
+ *The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the cluster, the AWS account ID of the cluster owner, the cluster
namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test
.
The number of tasks in the cluster that are in the RUNNING
state.
A user-generated string that you use to identify your cluster.
*/ - runningTasksCount?: number; - - /** - *Additional information about your clusters that are separated by launch type, - * including:
- *runningEC2TasksCount
- *RunningFargateTasksCount
- *pendingEC2TasksCount
- *pendingFargateTasksCount
- *activeEC2ServiceCount
- *activeFargateServiceCount
- *drainingEC2ServiceCount
- *drainingFargateServiceCount
- *The default capacity provider strategy for the cluster. When services or tasks are run - * in the cluster with no launch type or capacity provider strategy specified, the default - * capacity provider strategy is used.
- */ - defaultCapacityProviderStrategy?: CapacityProviderStrategyItem[]; + clusterName?: string; /** *The status of the cluster. The following are the possible states that will be @@ -774,46 +720,58 @@ export interface Cluster { status?: string; /** - *
The number of tasks in the cluster that are in the PENDING
state.
The number of container instances registered into the cluster. This includes container
+ * instances in both ACTIVE
and DRAINING
status.
The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the cluster, the AWS account ID of the cluster owner, the cluster
namespace, and then the cluster name. For example, arn:aws:ecs:region:012345678910:cluster/test
.
The number of tasks in the cluster that are in the RUNNING
state.
A user-generated string that you use to identify your cluster.
+ *The number of tasks in the cluster that are in the PENDING
state.
The number of container instances registered into the cluster. This includes container
- * instances in both ACTIVE
and DRAINING
status.
The number of services that are running on the cluster in an ACTIVE
+ * state. You can view these services with ListServices.
The status of the capacity providers associated with the cluster. The following are - * the states that will be returned:
- *The available capacity providers for the cluster are updating. This occurs - * when the Auto Scaling plan is provisioning or deprovisioning.
- *The capacity providers have successfully updated.
- *The capacity provider updates failed.
- *Additional information about your clusters that are separated by launch type, + * including:
+ *runningEC2TasksCount
+ *RunningFargateTasksCount
+ *pendingEC2TasksCount
+ *pendingFargateTasksCount
+ *activeEC2ServiceCount
+ *activeFargateServiceCount
+ *drainingEC2ServiceCount
+ *drainingFargateServiceCount
+ *The metadata that you apply to the cluster to help you categorize and organize them. @@ -851,6 +809,51 @@ export interface Cluster { * */ tags?: Tag[]; + + /** + *
The settings for the cluster. This parameter indicates whether CloudWatch Container Insights + * is enabled or disabled for a cluster.
+ */ + settings?: ClusterSetting[]; + + /** + *The capacity providers associated with the cluster.
+ */ + capacityProviders?: string[]; + + /** + *The default capacity provider strategy for the cluster. When services or tasks are run + * in the cluster with no launch type or capacity provider strategy specified, the default + * capacity provider strategy is used.
+ */ + defaultCapacityProviderStrategy?: CapacityProviderStrategyItem[]; + + /** + *The resources attached to a cluster. When using a capacity provider with a cluster, + * the Auto Scaling plan that is created will be returned as a cluster attachment.
+ */ + attachments?: Attachment[]; + + /** + *The status of the capacity providers associated with the cluster. The following are + * the states that will be returned:
+ *The available capacity providers for the cluster are updating. This occurs + * when the Auto Scaling plan is provisioning or deprovisioning.
+ *The capacity providers have successfully updated.
+ *The capacity provider updates failed.
+ *The deployment circuit breaker can only be used for services using the rolling
+ * update (ECS
) deployment type that are not behind a Classic Load Balancer.
The deployment circuit breaker determines whether a + * service deployment will fail if the service can't reach a steady state. If enabled, a + * service deployment will transition to a failed state and stop launching new tasks. You + * can also enable Amazon ECS to roll back your service to the last completed deployment after a + * failure. For more information, see Rolling + * update in the Amazon Elastic Container Service Developer Guide.
+ */ +export interface DeploymentCircuitBreaker { + /** + *Whether to enable the deployment circuit breaker logic for the service.
+ */ + enable: boolean | undefined; + + /** + *Whether to enable Amazon ECS to roll back the service if a service deployment fails. If + * rollback is enabled, when a service deployment fails, the service is rolled back to the + * last deployment that completed successfully.
+ */ + rollback: boolean | undefined; +} + +export namespace DeploymentCircuitBreaker { + export const filterSensitiveLog = (obj: DeploymentCircuitBreaker): any => ({ + ...obj, + }); +} + /** *Optional deployment parameters that control how many tasks run during a deployment and * the ordering of stopping and starting tasks.
*/ export interface DeploymentConfiguration { /** - *If a service is using the rolling update (ECS
) deployment type, the
- * minimum healthy percent represents a lower limit on
- * the number of tasks in a service that must remain in the RUNNING
state
- * during a deployment, as a percentage of the desired number of tasks (rounded up to the
- * nearest integer), and while any container instances are in the DRAINING
- * state if the service contains tasks using the EC2 launch type. This
- * parameter enables you to deploy without using additional cluster capacity. For example,
- * if your service has a desired number of four tasks and a minimum healthy percent of 50%,
- * the scheduler may stop two existing tasks to free up cluster capacity before starting
- * two new tasks. Tasks for services that do not use a load balancer
- * are considered healthy if they are in the RUNNING
state; tasks for services
- * that do use a load balancer are considered healthy if they are in
- * the RUNNING
state and they are reported as healthy by the load balancer.
- * The default value for minimum healthy percent is 100%.
If a service is using the blue/green (CODE_DEPLOY
) or
- * EXTERNAL
deployment types and tasks that use the EC2
- * launch type, the minimum healthy percent value is set
- * to the default value and is used to define the lower limit on the number of the tasks in
- * the service that remain in the RUNNING
state while the container instances
- * are in the DRAINING
state. If the tasks in the service use the
- * Fargate launch type, the minimum healthy percent value is not used,
- * although it is returned when describing your service.
The deployment circuit breaker can only be used for services using the rolling
+ * update (ECS
) deployment type.
The deployment circuit breaker determines whether a + * service deployment will fail if the service can't reach a steady state. If deployment + * circuit breaker is enabled, a service deployment will transition to a failed state and + * stop launching new tasks. If rollback is enabled, when a service deployment fails, the + * service is rolled back to the last deployment that completed successfully.
*/ - minimumHealthyPercent?: number; + deploymentCircuitBreaker?: DeploymentCircuitBreaker; /** *If a service is using the rolling update (ECS
) deployment type, the
@@ -941,6 +963,32 @@ export interface DeploymentConfiguration {
* returned when describing your service.
If a service is using the rolling update (ECS
) deployment type, the
+ * minimum healthy percent represents a lower limit on
+ * the number of tasks in a service that must remain in the RUNNING
state
+ * during a deployment, as a percentage of the desired number of tasks (rounded up to the
+ * nearest integer), and while any container instances are in the DRAINING
+ * state if the service contains tasks using the EC2 launch type. This
+ * parameter enables you to deploy without using additional cluster capacity. For example,
+ * if your service has a desired number of four tasks and a minimum healthy percent of 50%,
+ * the scheduler may stop two existing tasks to free up cluster capacity before starting
+ * two new tasks. Tasks for services that do not use a load balancer
+ * are considered healthy if they are in the RUNNING
state; tasks for services
+ * that do use a load balancer are considered healthy if they are in
+ * the RUNNING
state and they are reported as healthy by the load balancer.
+ * The default value for minimum healthy percent is 100%.
If a service is using the blue/green (CODE_DEPLOY
) or
+ * EXTERNAL
deployment types and tasks that use the EC2
+ * launch type, the minimum healthy percent value is set
+ * to the default value and is used to define the lower limit on the number of the tasks in
+ * the service that remain in the RUNNING
state while the container instances
+ * are in the DRAINING
state. If the tasks in the service use the
+ * Fargate launch type, the minimum healthy percent value is not used,
+ * although it is returned when describing your service.
The name of the container (as it appears in a container definition) to associate with - * the load balancer.
- */ - containerName?: string; - - /** - *The port on the container to associate with the load balancer. This port must
- * correspond to a containerPort
in the task definition the tasks in the
- * service are using. For tasks that use the EC2 launch type, the container
- * instance they are launched on must allow ingress traffic on the hostPort
of
- * the port mapping.
The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or * task set.
@@ -1048,6 +1081,21 @@ export interface LoadBalancer { * or a Network Load Balancer the load balancer name parameter should be omitted. */ loadBalancerName?: string; + + /** + *The name of the container (as it appears in a container definition) to associate with + * the load balancer.
+ */ + containerName?: string; + + /** + *The port on the container to associate with the load balancer. This port must
+ * correspond to a containerPort
in the task definition the tasks in the
+ * service are using. For tasks that use the EC2 launch type, the container
+ * instance they are launched on must allow ingress traffic on the hostPort
of
+ * the port mapping.
An object representing the networking details for a task or service.
*/ export interface AwsVpcConfiguration { + /** + *The IDs of the subnets associated with the task or service. There is a limit of 16
+ * subnets that can be specified per AwsVpcConfiguration
.
All specified subnets must be from the same VPC.
+ *The IDs of the security groups associated with the task or service. If you do not
* specify a security group, the default security group for the VPC is used. There is a
@@ -1082,17 +1140,7 @@ export interface AwsVpcConfiguration {
* value is DISABLED
.
The IDs of the subnets associated with the task or service. There is a limit of 16
- * subnets that can be specified per AwsVpcConfiguration
.
All specified subnets must be from the same VPC.
- *The field to apply the placement strategy against. For the spread
- * placement strategy, valid values are instanceId
(or host
,
- * which has the same effect), or any platform or custom attribute that is applied to a
- * container instance, such as attribute:ecs.availability-zone
. For the
- * binpack
placement strategy, valid values are cpu
and
- * memory
. For the random
placement strategy, this field is
- * not used.
The type of placement strategy. The random
placement strategy randomly
* places tasks on available candidates. The spread
placement strategy spreads
@@ -1189,6 +1226,17 @@ export interface PlacementStrategy {
* the least amount of remaining memory (but still enough to run the task).
The field to apply the placement strategy against. For the spread
+ * placement strategy, valid values are instanceId
(or host
,
+ * which has the same effect), or any platform or custom attribute that is applied to a
+ * container instance, such as attribute:ecs.availability-zone
. For the
+ * binpack
placement strategy, valid values are cpu
and
+ * memory
. For the random
placement strategy, this field is
+ * not used.
The port value, already specified in the task definition, to be used for your service
- * discovery service. If the task definition your service task specifies uses the
- * bridge
or host
network mode, you must specify a
- * containerName
and containerPort
combination from the task
- * definition. If the task definition your service task specifies uses the
- * awsvpc
network mode and a type SRV DNS record is used, you must specify
- * either a containerName
and containerPort
combination or a
- * port
value, but not both.
The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is + * AWS Cloud Map. For more information, see CreateService.
*/ - containerPort?: number; + registryArn?: string; + + /** + *The port value used if your service discovery service specified an SRV record. This
+ * field may be used if both the awsvpc
network mode and SRV records are
+ * used.
The container name value, already specified in the task definition, to be used for @@ -1236,17 +1285,16 @@ export interface ServiceRegistry { containerName?: string; /** - *
The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is - * AWS Cloud Map. For more information, see CreateService.
- */ - registryArn?: string; - - /** - *The port value used if your service discovery service specified an SRV record. This
- * field may be used if both the awsvpc
network mode and SRV records are
- * used.
The port value, already specified in the task definition, to be used for your service
+ * discovery service. If the task definition your service task specifies uses the
+ * bridge
or host
network mode, you must specify a
+ * containerName
and containerPort
combination from the task
+ * definition. If the task definition your service task specifies uses the
+ * awsvpc
network mode and a type SRV DNS record is used, you must specify
+ * either a containerName
and containerPort
combination or a
+ * port
value, but not both.
The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. + * If you do not specify a cluster, the default cluster is assumed.
+ */ + cluster?: string; + + /** + *The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. Service names must be unique within + * a cluster, but you can have similarly named services in multiple clusters within a + * Region or across multiple Regions.
+ */ + serviceName: string | undefined; + + /** + *The family
and revision
(family:revision
) or
+ * full ARN of the task definition to run in your service. If a revision
is
+ * not specified, the latest ACTIVE
revision is used.
A task definition must be specified if the service is using either the
+ * ECS
or CODE_DEPLOY
deployment controllers.
A load balancer object representing the load balancers to use with your service. For * more information, see Service Load Balancing in the @@ -1310,60 +1380,50 @@ export interface CreateServiceRequest { serviceRegistries?: ServiceRegistry[]; /** - *
The deployment controller to use for the service.
+ *The number of instantiations of the specified task definition to place and keep + * running on your cluster.
+ *This is required if schedulingStrategy
is REPLICA
or is not
+ * specified. If schedulingStrategy
is DAEMON
then this is not
+ * required.
Specifies whether to propagate the tags from the task definition or the service to the - * tasks in the service. If no value is specified, the tags are not propagated. Tags can - * only be propagated to the tasks within the service during service creation. To add tags - * to a task after service creation, use the TagResource API - * action.
+ *Unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Up to 32 ASCII characters are allowed.
*/ - propagateTags?: PropagateTags | string; + clientToken?: string; /** - *The scheduling strategy to use for the service. For more information, see Services.
- *There are two service scheduler strategies available:
- *
- * REPLICA
-The replica scheduling strategy places and
- * maintains the desired number of tasks across your cluster. By default, the
- * service scheduler spreads tasks across Availability Zones. You can use task
- * placement strategies and constraints to customize task placement decisions. This
- * scheduler strategy is required if the service is using the
- * CODE_DEPLOY
or EXTERNAL
deployment controller
- * types.
- * DAEMON
-The daemon scheduling strategy deploys exactly one
- * task on each active container instance that meets all of the task placement
- * constraints that you specify in your cluster. The service scheduler also
- * evaluates the task placement constraints for running tasks and will stop tasks
- * that do not meet the placement constraints. When you're using this strategy, you
- * don't need to specify a desired number of tasks, a task placement strategy, or
- * use Service Auto Scaling policies.
Tasks using the Fargate launch type or the
- * CODE_DEPLOY
or EXTERNAL
deployment controller
- * types don't support the DAEMON
scheduling strategy.
The launch type on which to run your service. For more information, see Amazon ECS + * Launch Types in the Amazon Elastic Container Service Developer Guide.
+ *If a launchType
is specified, the capacityProviderStrategy
+ * parameter must be omitted.
The network configuration for the service. This parameter is required for task
- * definitions that use the awsvpc
network mode to receive their own elastic
- * network interface, and it is not supported for other network modes. For more
- * information, see Task Networking
- * in the Amazon Elastic Container Service Developer Guide.
The capacity provider strategy to use for the service.
+ *A capacity provider strategy consists of one or more capacity providers along with the
+ * base
and weight
to assign to them. A capacity provider
+ * must be associated with the cluster to be used in a capacity provider strategy. The
+ * PutClusterCapacityProviders API is used to associate a capacity
+ * provider with a cluster. Only capacity providers with an ACTIVE
or
+ * UPDATING
status can be used.
If a capacityProviderStrategy
is specified, the launchType
+ * parameter must be omitted. If no capacityProviderStrategy
or
+ * launchType
is specified, the
+ * defaultCapacityProviderStrategy
for the cluster is used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity + * provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
+ *To use a AWS Fargate capacity provider, specify either the FARGATE
or
+ * FARGATE_SPOT
capacity providers. The AWS Fargate capacity providers are
+ * available to all accounts and only need to be associated with a cluster to be
+ * used.
The PutClusterCapacityProviders API operation is used to update the + * list of available capacity providers for a cluster after the cluster is created.
*/ - networkConfiguration?: NetworkConfiguration; + capacityProviderStrategy?: CapacityProviderStrategyItem[]; /** *The platform version that your tasks in the service are running on. A platform version @@ -1374,12 +1434,6 @@ export interface CreateServiceRequest { */ platformVersion?: string; - /** - *
The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. - * If you do not specify a cluster, the default cluster is assumed.
- */ - cluster?: string; - /** *The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your * load balancer on your behalf. This parameter is only permitted if you are using a load @@ -1411,11 +1465,78 @@ export interface CreateServiceRequest { deploymentConfiguration?: DeploymentConfiguration; /** - *
The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. Service names must be unique within - * a cluster, but you can have similarly named services in multiple clusters within a - * Region or across multiple Regions.
+ *An array of placement constraint objects to use for tasks in your service. You can + * specify a maximum of 10 constraints per task (this limit includes constraints in the + * task definition and those specified at runtime).
*/ - serviceName: string | undefined; + placementConstraints?: PlacementConstraint[]; + + /** + *The placement strategy objects to use for tasks in your service. You can specify a + * maximum of five strategy rules per service.
+ */ + placementStrategy?: PlacementStrategy[]; + + /** + *The network configuration for the service. This parameter is required for task
+ * definitions that use the awsvpc
network mode to receive their own elastic
+ * network interface, and it is not supported for other network modes. For more
+ * information, see Task Networking
+ * in the Amazon Elastic Container Service Developer Guide.
The period of time, in seconds, that the Amazon ECS service scheduler should ignore
+ * unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used
+ * when your service is configured to use a load balancer. If your service has a load
+ * balancer defined and you don't specify a health check grace period value, the default
+ * value of 0
is used.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you + * can specify a health check grace period of up to 2,147,483,647 seconds. During that + * time, the Amazon ECS service scheduler ignores health check status. This grace period can + * prevent the service scheduler from marking tasks as unhealthy and stopping them before + * they have time to come up.
+ */ + healthCheckGracePeriodSeconds?: number; + + /** + *The scheduling strategy to use for the service. For more information, see Services.
+ *There are two service scheduler strategies available:
+ *
+ * REPLICA
-The replica scheduling strategy places and
+ * maintains the desired number of tasks across your cluster. By default, the
+ * service scheduler spreads tasks across Availability Zones. You can use task
+ * placement strategies and constraints to customize task placement decisions. This
+ * scheduler strategy is required if the service is using the
+ * CODE_DEPLOY
or EXTERNAL
deployment controller
+ * types.
+ * DAEMON
-The daemon scheduling strategy deploys exactly one
+ * task on each active container instance that meets all of the task placement
+ * constraints that you specify in your cluster. The service scheduler also
+ * evaluates the task placement constraints for running tasks and will stop tasks
+ * that do not meet the placement constraints. When you're using this strategy, you
+ * don't need to specify a desired number of tasks, a task placement strategy, or
+ * use Service Auto Scaling policies.
Tasks using the Fargate launch type or the
+ * CODE_DEPLOY
or EXTERNAL
deployment controller
+ * types don't support the DAEMON
scheduling strategy.
The deployment controller to use for the service.
+ */ + deploymentController?: DeploymentController; /** *The metadata that you apply to the service to help you categorize and organize them. @@ -1463,87 +1584,14 @@ export interface CreateServiceRequest { enableECSManagedTags?: boolean; /** - *
The placement strategy objects to use for tasks in your service. You can specify a - * maximum of five strategy rules per service.
+ *Specifies whether to propagate the tags from the task definition or the service to the + * tasks in the service. If no value is specified, the tags are not propagated. Tags can + * only be propagated to the tasks within the service during service creation. To add tags + * to a task after service creation, use the TagResource API + * action.
*/ - placementStrategy?: PlacementStrategy[]; - - /** - *The number of instantiations of the specified task definition to place and keep - * running on your cluster.
- *This is required if schedulingStrategy
is REPLICA
or is not
- * specified. If schedulingStrategy
is DAEMON
then this is not
- * required.
Unique, case-sensitive identifier that you provide to ensure the idempotency of the - * request. Up to 32 ASCII characters are allowed.
- */ - clientToken?: string; - - /** - *An array of placement constraint objects to use for tasks in your service. You can - * specify a maximum of 10 constraints per task (this limit includes constraints in the - * task definition and those specified at runtime).
- */ - placementConstraints?: PlacementConstraint[]; - - /** - *The family
and revision
(family:revision
) or
- * full ARN of the task definition to run in your service. If a revision
is
- * not specified, the latest ACTIVE
revision is used.
A task definition must be specified if the service is using either the
- * ECS
or CODE_DEPLOY
deployment controllers.
The period of time, in seconds, that the Amazon ECS service scheduler should ignore
- * unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used
- * when your service is configured to use a load balancer. If your service has a load
- * balancer defined and you don't specify a health check grace period value, the default
- * value of 0
is used.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you - * can specify a health check grace period of up to 2,147,483,647 seconds. During that - * time, the Amazon ECS service scheduler ignores health check status. This grace period can - * prevent the service scheduler from marking tasks as unhealthy and stopping them before - * they have time to come up.
- */ - healthCheckGracePeriodSeconds?: number; - - /** - *The capacity provider strategy to use for the service.
- *A capacity provider strategy consists of one or more capacity providers along with the
- * base
and weight
to assign to them. A capacity provider
- * must be associated with the cluster to be used in a capacity provider strategy. The
- * PutClusterCapacityProviders API is used to associate a capacity
- * provider with a cluster. Only capacity providers with an ACTIVE
or
- * UPDATING
status can be used.
If a capacityProviderStrategy
is specified, the launchType
- * parameter must be omitted. If no capacityProviderStrategy
or
- * launchType
is specified, the
- * defaultCapacityProviderStrategy
for the cluster is used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity - * provider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
- *To use a AWS Fargate capacity provider, specify either the FARGATE
or
- * FARGATE_SPOT
capacity providers. The AWS Fargate capacity providers are
- * available to all accounts and only need to be associated with a cluster to be
- * used.
The PutClusterCapacityProviders API operation is used to update the - * list of available capacity providers for a cluster after the cluster is created.
- */ - capacityProviderStrategy?: CapacityProviderStrategyItem[]; - - /** - *The launch type on which to run your service. For more information, see Amazon ECS - * Launch Types in the Amazon Elastic Container Service Developer Guide.
- *If a launchType
is specified, the capacityProviderStrategy
- * parameter must be omitted.
The details of an Amazon ECS service deployment. This is used only when a service uses the
* ECS
deployment controller type.
The capacity provider strategy that the deployment is using.
+ *The ID of the deployment.
*/ - capacityProviderStrategy?: CapacityProviderStrategyItem[]; + id?: string; /** - *The launch type the tasks in the service are using. For more information, see Amazon ECS - * Launch Types in the Amazon Elastic Container Service Developer Guide.
+ *The status of the deployment. The following describes each state:
+ *The most recent deployment of a service.
+ *A service deployment that still has running tasks, but are in the process
+ * of being replaced with a new PRIMARY
deployment.
A deployment that has been completely replaced.
+ *The Unix timestamp for when the service deployment was last updated.
+ *The most recent task definition that was specified for the tasks in the service to + * use.
*/ - updatedAt?: Date; + taskDefinition?: string; /** - *The ID of the deployment.
+ *The most recent desired count of tasks that was specified for the service to deploy or + * maintain.
*/ - id?: string; + desiredCount?: number; + + /** + *The number of tasks in the deployment that are in the PENDING
+ * status.
The number of tasks in the deployment that are in the RUNNING
@@ -1584,47 +1660,37 @@ export interface Deployment {
runningCount?: number;
/**
- *
The Unix timestamp for when the service deployment was created.
+ *The number of consecutively failed tasks in the deployment. A task is considered a
+ * failure if the service scheduler can't launch the task, the task doesn't transition to a
+ * RUNNING
state, or if it fails any of its defined health checks and is
+ * stopped.
Once a service deployment has one or more successfully running tasks, the failed + * task count resets to zero and stops being evaluated.
+ *The number of tasks in the deployment that are in the PENDING
- * status.
The Unix timestamp for when the service deployment was created.
*/ - pendingCount?: number; + createdAt?: Date; /** - *The VPC subnet and security group configuration for tasks that receive their own
- * elastic network interface by using the awsvpc
networking mode.
The Unix timestamp for when the service deployment was last updated.
*/ - networkConfiguration?: NetworkConfiguration; + updatedAt?: Date; /** - *The most recent desired count of tasks that was specified for the service to deploy or - * maintain.
+ *The capacity provider strategy that the deployment is using.
*/ - desiredCount?: number; + capacityProviderStrategy?: CapacityProviderStrategyItem[]; /** - *The status of the deployment. The following describes each state:
- *The most recent deployment of a service.
- *A service deployment that still has running tasks, but are in the process
- * of being replaced with a new PRIMARY
deployment.
A deployment that has been completely replaced.
- *The launch type the tasks in the service are using. For more information, see Amazon ECS + * Launch Types in the Amazon Elastic Container Service Developer Guide.
*/ - status?: string; + launchType?: LaunchType | string; /** *The platform version on which your tasks in the service are running. A platform @@ -1636,10 +1702,30 @@ export interface Deployment { platformVersion?: string; /** - *
The most recent task definition that was specified for the tasks in the service to - * use.
+ *The VPC subnet and security group configuration for tasks that receive their own
+ * elastic network interface by using the awsvpc
networking mode.
The rolloutState
of a service is only returned for services that use
+ * the rolling update (ECS
) deployment type that are not behind a
+ * Classic Load Balancer.
The rollout state of the deployment. When a service deployment is started, it begins
+ * in an IN_PROGRESS
state. When the service reaches a steady state, the
+ * deployment will transition to a COMPLETED
state. If the service fails to
+ * reach a steady state and circuit breaker is enabled, the deployment will transition to a
+ * FAILED
state. A deployment in FAILED
state will launch no
+ * new tasks. For more information, see DeploymentCircuitBreaker.
A description of the rollout state of a deployment.
+ */ + rolloutStateReason?: string; } export namespace Deployment { @@ -1683,16 +1769,16 @@ export enum ScaleUnit { * in the task set. */ export interface Scale { - /** - *The unit of measure for the scale value.
- */ - unit?: ScaleUnit | string; - /** *The value, specified as a percent total of a service's desiredCount
, to
* scale the task set. Accepted values are numbers between 0 and 100.
The unit of measure for the scale value.
+ */ + unit?: ScaleUnit | string; } export namespace Scale { @@ -1713,17 +1799,33 @@ export enum StabilityStatus { */ export interface TaskSet { /** - *The details of the service discovery registries to assign to this task set. For more - * information, see Service - * Discovery.
+ *The ID of the task set.
*/ - serviceRegistries?: ServiceRegistry[]; + id?: string; + + /** + *The Amazon Resource Name (ARN) of the task set.
+ */ + taskSetArn?: string; /** *The Amazon Resource Name (ARN) of the service the task set exists in.
*/ serviceArn?: string; + /** + *The Amazon Resource Name (ARN) of the cluster that the service that hosts the task set exists + * in.
+ */ + clusterArn?: string; + + /** + *The tag specified when a task set is started. If the task set is created by an AWS CodeDeploy
+ * deployment, the startedBy
parameter is CODE_DEPLOY
. For a task
+ * set created for an external deployment, the startedBy field isn't used.
The external ID associated with the task set.
*If a task set is created by an AWS CodeDeploy deployment, the externalId
parameter
@@ -1735,18 +1837,29 @@ export interface TaskSet {
externalId?: string;
/**
- *
Details on a load balancer that is used with a task set.
+ *The status of the task set. The following describes each state:
+ *The task set is serving production traffic.
+ *The task set is not serving production traffic.
+ *The tasks in the task set are being stopped and their corresponding + * targets are being deregistered from their target group.
+ *The platform version on which the tasks in the task set are running. A platform
- * version is only specified for tasks using the Fargate launch type. If one
- * is not specified, the LATEST
platform version is used by default. For more
- * information, see AWS Fargate Platform
- * Versions in the Amazon Elastic Container Service Developer Guide.
The task definition the task set is using.
*/ - platformVersion?: string; + taskDefinition?: string; /** *The computed desired count for the task set. This is calculated by multiplying the @@ -1757,20 +1870,25 @@ export interface TaskSet { computedDesiredCount?: number; /** - *
The ID of the task set.
+ *The number of tasks in the task set that are in the PENDING
status during
+ * a deployment. A task in the PENDING
state is preparing to enter the
+ * RUNNING
state. A task set enters the PENDING
status when
+ * it launches for the first time or when it is restarted after being in the
+ * STOPPED
state.
The task definition the task set is using.
+ *The number of tasks in the task set that are in the RUNNING
status during
+ * a deployment. A task in the RUNNING
state is running and ready for
+ * use.
The launch type the tasks in the task set are using. For more information, see Amazon ECS - * Launch Types in the Amazon Elastic Container Service Developer Guide.
+ *The Unix timestamp for when the task set was created.
*/ - launchType?: LaunchType | string; + createdAt?: Date; /** *The Unix timestamp for when the task set was last updated.
@@ -1778,92 +1896,41 @@ export interface TaskSet { updatedAt?: Date; /** - *The capacity provider strategy associated with the task set.
+ *The launch type the tasks in the task set are using. For more information, see Amazon ECS + * Launch Types in the Amazon Elastic Container Service Developer Guide.
*/ - capacityProviderStrategy?: CapacityProviderStrategyItem[]; + launchType?: LaunchType | string; /** - *The Unix timestamp for when the task set was created.
- */ - createdAt?: Date; - - /** - *The number of tasks in the task set that are in the PENDING
status during
- * a deployment. A task in the PENDING
state is preparing to enter the
- * RUNNING
state. A task set enters the PENDING
status when
- * it launches for the first time or when it is restarted after being in the
- * STOPPED
state.
The capacity provider strategy associated with the task set.
*/ - pendingCount?: number; + capacityProviderStrategy?: CapacityProviderStrategyItem[]; /** - *The metadata that you apply to the task set to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define.
- *The following basic restrictions apply to tags:
- *Maximum number of tags per resource - 50
- *For each resource, each tag key must be unique, and each tag key can have only - * one value.
- *Maximum key length - 128 Unicode characters in UTF-8
- *Maximum value length - 256 Unicode characters in UTF-8
- *If your tagging schema is used across multiple services and resources, - * remember that other services may have restrictions on allowed characters. - * Generally allowed characters are: letters, numbers, and spaces representable in - * UTF-8, and the following characters: + - = . _ : / @.
- *Tag keys and values are case-sensitive.
- *Do not use aws:
, AWS:
, or any upper or lowercase
- * combination of such as a prefix for either keys or values as it is reserved for
- * AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with
- * this prefix do not count against your tags per resource limit.
The platform version on which the tasks in the task set are running. A platform
+ * version is only specified for tasks using the Fargate launch type. If one
+ * is not specified, the LATEST
platform version is used by default. For more
+ * information, see AWS Fargate Platform
+ * Versions in the Amazon Elastic Container Service Developer Guide.
The status of the task set. The following describes each state:
- *The task set is serving production traffic.
- *The task set is not serving production traffic.
- *The tasks in the task set are being stopped and their corresponding - * targets are being deregistered from their target group.
- *The network configuration for the task set.
*/ - status?: string; + networkConfiguration?: NetworkConfiguration; /** - *The number of tasks in the task set that are in the RUNNING
status during
- * a deployment. A task in the RUNNING
state is running and ready for
- * use.
Details on a load balancer that is used with a task set.
*/ - runningCount?: number; + loadBalancers?: LoadBalancer[]; /** - *The Amazon Resource Name (ARN) of the task set.
+ *The details of the service discovery registries to assign to this task set. For more + * information, see Service + * Discovery.
*/ - taskSetArn?: string; + serviceRegistries?: ServiceRegistry[]; /** *A floating-point percentage of the desired number of tasks to place and keep running @@ -1903,22 +1970,41 @@ export interface TaskSet { stabilityStatusAt?: Date; /** - *
The Amazon Resource Name (ARN) of the cluster that the service that hosts the task set exists - * in.
- */ - clusterArn?: string; - - /** - *The network configuration for the task set.
- */ - networkConfiguration?: NetworkConfiguration; - - /** - *The tag specified when a task set is started. If the task set is created by an AWS CodeDeploy
- * deployment, the startedBy
parameter is CODE_DEPLOY
. For a task
- * set created for an external deployment, the startedBy field isn't used.
The metadata that you apply to the task set to help you categorize and organize them. + * Each tag consists of a key and an optional value, both of which you define.
+ *The following basic restrictions apply to tags:
+ *Maximum number of tags per resource - 50
+ *For each resource, each tag key must be unique, and each tag key can have only + * one value.
+ *Maximum key length - 128 Unicode characters in UTF-8
+ *Maximum value length - 256 Unicode characters in UTF-8
+ *If your tagging schema is used across multiple services and resources, + * remember that other services may have restrictions on allowed characters. + * Generally allowed characters are: letters, numbers, and spaces representable in + * UTF-8, and the following characters: + - = . _ : / @.
+ *Tag keys and values are case-sensitive.
+ *Do not use aws:
, AWS:
, or any upper or lowercase
+ * combination of such as a prefix for either keys or values as it is reserved for
+ * AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with
+ * this prefix do not count against your tags per resource limit.
Information about a set of Amazon ECS tasks in either an AWS CodeDeploy or an EXTERNAL
- * deployment. An Amazon ECS task set includes details such as the desired number of tasks, how
- * many tasks are running, and whether the task set serves production traffic.
The ARN that identifies the service. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the service, the AWS account ID of the service owner, the service
namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service
.
The placement strategy that determines how tasks for the service are placed.
+ *The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. Service names must be unique within + * a cluster, but you can have similarly named services in multiple clusters within a + * Region or across multiple Regions.
*/ - placementStrategy?: PlacementStrategy[]; + serviceName?: string; /** - *The status of the service. The valid values are ACTIVE
,
- * DRAINING
, or INACTIVE
.
The Amazon Resource Name (ARN) of the cluster that hosts the service.
*/ - status?: string; + clusterArn?: string; /** *A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the @@ -1957,17 +2042,46 @@ export interface Service { loadBalancers?: LoadBalancer[]; /** - *
Specifies whether to propagate the tags from the task definition or the service to the - * task. If no value is specified, the tags are not propagated.
+ *The details of the service discovery registries to assign to this service. For more + * information, see Service + * Discovery.
*/ - propagateTags?: PropagateTags | string; + serviceRegistries?: ServiceRegistry[]; /** - *The task definition to use for tasks in the service. This value is specified when the - * service is created with CreateService, and it can be modified with - * UpdateService.
+ *The status of the service. The valid values are ACTIVE
,
+ * DRAINING
, or INACTIVE
.
The desired number of instantiations of the task definition to keep running on the + * service. This value is specified when the service is created with CreateService, and it can be modified with UpdateService.
+ */ + desiredCount?: number; + + /** + *The number of tasks in the cluster that are in the RUNNING
state.
The number of tasks in the cluster that are in the PENDING
state.
The launch type on which your service is running. If no value is specified, it will
+ * default to EC2
. Valid values include EC2
and
+ * FARGATE
. For more information, see Amazon ECS
+ * Launch Types in the Amazon Elastic Container Service Developer Guide.
The capacity provider strategy associated with the service.
+ */ + capacityProviderStrategy?: CapacityProviderStrategyItem[]; /** *The platform version on which to run your service. A platform version is only @@ -1978,6 +2092,26 @@ export interface Service { */ platformVersion?: string; + /** + *
The task definition to use for tasks in the service. This value is specified when the + * service is created with CreateService, and it can be modified with + * UpdateService.
+ */ + taskDefinition?: string; + + /** + *Optional deployment parameters that control how many tasks run during the deployment + * and the ordering of stopping and starting tasks.
+ */ + deploymentConfiguration?: DeploymentConfiguration; + + /** + *Information about a set of Amazon ECS tasks in either an AWS CodeDeploy or an EXTERNAL
+ * deployment. An Amazon ECS task set includes details such as the desired number of tasks, how
+ * many tasks are running, and whether the task set serves production traffic.
The current state of deployments for the service.
*/ @@ -1990,50 +2124,83 @@ export interface Service { roleArn?: string; /** - *The ARN that identifies the service. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the service, the AWS account ID of the service owner, the service
namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service
.
The event stream for your service. A maximum of 100 of the latest events are + * displayed.
*/ - serviceArn?: string; + events?: ServiceEvent[]; /** - *The principal that created the service.
+ *The Unix timestamp for when the service was created.
*/ - createdBy?: string; + createdAt?: Date; /** - *The deployment controller type the service is using. When using the DescribeServices
- * API, this field is omitted if the service is using the ECS
deployment
- * controller type.
The placement constraints for the tasks in the service.
*/ - deploymentController?: DeploymentController; + placementConstraints?: PlacementConstraint[]; /** - *The details of the service discovery registries to assign to this service. For more - * information, see Service - * Discovery.
+ *The placement strategy that determines how tasks for the service are placed.
*/ - serviceRegistries?: ServiceRegistry[]; + placementStrategy?: PlacementStrategy[]; /** - *Optional deployment parameters that control how many tasks run during the deployment - * and the ordering of stopping and starting tasks.
+ *The VPC subnet and security group configuration for tasks that receive their own
+ * elastic network interface by using the awsvpc
networking mode.
The Amazon Resource Name (ARN) of the cluster that hosts the service.
+ *The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy + * Elastic Load Balancing target health checks after a task has first started.
*/ - clusterArn?: string; + healthCheckGracePeriodSeconds?: number; /** - *The metadata that you apply to the service to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define.
- *The following basic restrictions apply to tags:
- *Maximum number of tags per resource - 50
- *The scheduling strategy to use for the service. For more information, see Services.
+ *There are two service scheduler strategies available:
+ *For each resource, each tag key must be unique, and each tag key can have only + *
+ * REPLICA
-The replica scheduling strategy places and
+ * maintains the desired number of tasks across your cluster. By default, the
+ * service scheduler spreads tasks across Availability Zones. You can use task
+ * placement strategies and constraints to customize task placement
+ * decisions.
+ * DAEMON
-The daemon scheduling strategy deploys exactly one
+ * task on each active container instance that meets all of the task placement
+ * constraints that you specify in your cluster. The service scheduler also
+ * evaluates the task placement constraints for running tasks and will stop tasks
+ * that do not meet the placement constraints.
Fargate tasks do not support the DAEMON
+ * scheduling strategy.
The deployment controller type the service is using. When using the DescribeServices
+ * API, this field is omitted if the service is using the ECS
deployment
+ * controller type.
The metadata that you apply to the service to help you categorize and organize them. + * Each tag consists of a key and an optional value, both of which you define.
+ *The following basic restrictions apply to tags:
+ *Maximum number of tags per resource - 50
+ *For each resource, each tag key must be unique, and each tag key can have only * one value.
*The principal that created the service.
+ */ + createdBy?: string; + /** *Specifies whether to enable Amazon ECS managed tags for the tasks in the service. For more
* information, see Tagging Your Amazon ECS
@@ -2069,96 +2241,10 @@ export interface Service {
enableECSManagedTags?: boolean;
/**
- * The number of tasks in the cluster that are in the The number of tasks in the cluster that are in the The desired number of instantiations of the task definition to keep running on the
- * service. This value is specified when the service is created with CreateService, and it can be modified with UpdateService. The placement constraints for the tasks in the service. The Unix timestamp for when the service was created. The capacity provider strategy associated with the service. The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy
- * Elastic Load Balancing target health checks after a task has first started. The launch type on which your service is running. If no value is specified, it will
- * default to The event stream for your service. A maximum of 100 of the latest events are
- * displayed. The VPC subnet and security group configuration for tasks that receive their own
- * elastic network interface by using the The name of your service. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. Service names must be unique within
- * a cluster, but you can have similarly named services in multiple clusters within a
- * Region or across multiple Regions. The scheduling strategy to use for the service. For more information, see Services. There are two service scheduler strategies available:
- *
- * Fargate tasks do not support the Specifies whether to propagate the tags from the task definition or the service to the
+ * task. If no value is specified, the tags are not propagated. The short name or full Amazon Resource Name (ARN) of the service to create the task set in. The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the
+ * task set in. An optional non-unique tag that identifies this task set in external systems. If the
+ * task set is associated with a service discovery registry, the tasks in this task set
+ * will have the The task definition for the tasks in the task set to use. An object representing the network configuration for a task or service. A load balancer object representing the load balancer to use with the task set. The
+ * supported load balancer types are either an Application Load Balancer or a Network Load Balancer. The details of the service discovery registries to assign to this task set. For more
+ * information, see Service
+ * Discovery. The launch type that new tasks in the task set will use. For more information, see
+ * Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide. If a The capacity provider strategy to use for the task set. A capacity provider strategy consists of one or more capacity providers along with the
@@ -2257,12 +2393,25 @@ export interface CreateTaskSetRequest {
*/
capacityProviderStrategy?: CapacityProviderStrategyItem[];
+ /**
+ * The platform version that the tasks in the task set should use. A platform version is
+ * specified only for tasks using the Fargate launch type. If one isn't
+ * specified, the A floating-point percentage of the desired number of tasks to place and keep running
* in the task set. Unique, case-sensitive identifier that you provide to ensure the idempotency of the
+ * request. Up to 32 ASCII characters are allowed. The metadata that you apply to the task set to help you categorize and organize them.
* Each tag consists of a key and an optional value, both of which you define. When a
@@ -2300,101 +2449,38 @@ export interface CreateTaskSetRequest {
* RUNNING
state.PENDING
state.EC2
. Valid values include EC2
and
- * FARGATE
. For more information, see Amazon ECS
- * Launch Types in the Amazon Elastic Container Service Developer Guide.awsvpc
networking mode.
- *
+ * REPLICA
-The replica scheduling strategy places and
- * maintains the desired number of tasks across your cluster. By default, the
- * service scheduler spreads tasks across Availability Zones. You can use task
- * placement strategies and constraints to customize task placement
- * decisions.DAEMON
-The daemon scheduling strategy deploys exactly one
- * task on each active container instance that meets all of the task placement
- * constraints that you specify in your cluster. The service scheduler also
- * evaluates the task placement constraints for running tasks and will stop tasks
- * that do not meet the placement constraints.DAEMON
- * scheduling strategy.ECS_TASK_SET_EXTERNAL_ID
AWS Cloud Map attribute set to the provided
+ * value.launchType
is specified, the capacityProviderStrategy
+ * parameter must be omitted.LATEST
platform version is used by default.
The launch type that new tasks in the task set will use. For more information, see - * Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.
- *If a launchType
is specified, the capacityProviderStrategy
- * parameter must be omitted.
Unique, case-sensitive identifier that you provide to ensure the idempotency of the - * request. Up to 32 ASCII characters are allowed.
- */ - clientToken?: string; +export namespace CreateTaskSetRequest { + export const filterSensitiveLog = (obj: CreateTaskSetRequest): any => ({ + ...obj, + }); +} +export interface CreateTaskSetResponse { /** - *The platform version that the tasks in the task set should use. A platform version is
- * specified only for tasks using the Fargate launch type. If one isn't
- * specified, the LATEST
platform version is used by default.
Information about a set of Amazon ECS tasks in either an AWS CodeDeploy or an EXTERNAL
+ * deployment. An Amazon ECS task set includes details such as the desired number of tasks, how
+ * many tasks are running, and whether the task set serves production traffic.
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service to create the - * task set in.
- */ - cluster: string | undefined; +export namespace CreateTaskSetResponse { + export const filterSensitiveLog = (obj: CreateTaskSetResponse): any => ({ + ...obj, + }); +} - /** - *A load balancer object representing the load balancer to use with the task set. The - * supported load balancer types are either an Application Load Balancer or a Network Load Balancer.
- */ - loadBalancers?: LoadBalancer[]; - - /** - *An optional non-unique tag that identifies this task set in external systems. If the
- * task set is associated with a service discovery registry, the tasks in this task set
- * will have the ECS_TASK_SET_EXTERNAL_ID
AWS Cloud Map attribute set to the provided
- * value.
An object representing the network configuration for a task or service.
- */ - networkConfiguration?: NetworkConfiguration; - - /** - *The short name or full Amazon Resource Name (ARN) of the service to create the task set in.
- */ - service: string | undefined; - - /** - *The task definition for the tasks in the task set to use.
- */ - taskDefinition: string | undefined; - - /** - *The details of the service discovery registries to assign to this task set. For more - * information, see Service - * Discovery.
- */ - serviceRegistries?: ServiceRegistry[]; -} - -export namespace CreateTaskSetRequest { - export const filterSensitiveLog = (obj: CreateTaskSetRequest): any => ({ - ...obj, - }); -} - -export interface CreateTaskSetResponse { - /** - *Information about a set of Amazon ECS tasks in either an AWS CodeDeploy or an EXTERNAL
- * deployment. An Amazon ECS task set includes details such as the desired number of tasks, how
- * many tasks are running, and whether the task set serves production traffic.
The specified service is not active. You can't update a service that is inactive. If - * you have previously deleted a service, you can re-create it with CreateService.
- */ -export interface ServiceNotActiveException extends __SmithyException, $MetadataBearer { - name: "ServiceNotActiveException"; - $fault: "client"; - message?: string; -} +/** + *The specified service is not active. You can't update a service that is inactive. If + * you have previously deleted a service, you can re-create it with CreateService.
+ */ +export interface ServiceNotActiveException extends __SmithyException, $MetadataBearer { + name: "ServiceNotActiveException"; + $fault: "client"; + message?: string; +} export namespace ServiceNotActiveException { export const filterSensitiveLog = (obj: ServiceNotActiveException): any => ({ @@ -2428,15 +2514,6 @@ export enum SettingName { } export interface DeleteAccountSettingRequest { - /** - *The ARN of the principal, which can be an IAM user, IAM role, or the root user. If you - * specify the root user, it disables the account setting for all IAM users, IAM roles, and - * the root user of the account unless an IAM user or role explicitly overrides these - * settings. If this field is omitted, the setting is changed only for the authenticated - * user.
- */ - principalArn?: string; - /** *The resource name for which to disable the account setting. If
* serviceLongArnFormat
is specified, the ARN for your Amazon ECS services is
@@ -2447,6 +2524,15 @@ export interface DeleteAccountSettingRequest {
* instances is affected.
The ARN of the principal, which can be an IAM user, IAM role, or the root user. If you + * specify the root user, it disables the account setting for all IAM users, IAM roles, and + * the root user of the account unless an IAM user or role explicitly overrides these + * settings. If this field is omitted, the setting is changed only for the authenticated + * user.
+ */ + principalArn?: string; } export namespace DeleteAccountSettingRequest { @@ -2459,12 +2545,6 @@ export namespace DeleteAccountSettingRequest { *The current account setting for a resource.
*/ export interface Setting { - /** - *The ARN of the principal, which can be an IAM user, IAM role, or the root user. If - * this field is omitted, the authenticated user is assumed.
- */ - principalArn?: string; - /** *The Amazon ECS resource name.
*/ @@ -2474,6 +2554,12 @@ export interface Setting { *Whether the account setting is enabled or disabled for the specified resource.
*/ value?: string; + + /** + *The ARN of the principal, which can be an IAM user, IAM role, or the root user. If + * this field is omitted, the authenticated user is assumed.
+ */ + principalArn?: string; } export namespace Setting { @@ -2505,6 +2591,13 @@ export enum TargetType { * information, see Attributes in the Amazon Elastic Container Service Developer Guide. */ export interface Attribute { + /** + *The name of the attribute. The name
must contain between 1 and 128
+ * characters and name may contain letters (uppercase and lowercase), numbers, hyphens,
+ * underscores, forward slashes, back slashes, or periods.
The value of the attribute. The value
must contain between 1 and 128
* characters and may contain letters (uppercase and lowercase), numbers, hyphens,
@@ -2513,12 +2606,6 @@ export interface Attribute {
*/
value?: string;
- /**
- *
The ID of the target. You can specify the short form ID for a resource or the full - * Amazon Resource Name (ARN).
- */ - targetId?: string; - /** *The type of the target with which to attach the attribute. This parameter is required * if you use the short form ID for a resource instead of the full ARN.
@@ -2526,11 +2613,10 @@ export interface Attribute { targetType?: TargetType | string; /** - *The name of the attribute. The name
must contain between 1 and 128
- * characters and name may contain letters (uppercase and lowercase), numbers, hyphens,
- * underscores, forward slashes, back slashes, or periods.
The ID of the target. You can specify the short form ID for a resource or the full + * Amazon Resource Name (ARN).
*/ - name: string | undefined; + targetId?: string; } export namespace Attribute { @@ -2540,6 +2626,12 @@ export namespace Attribute { } export interface DeleteAttributesRequest { + /** + *The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to delete + * attributes. If you do not specify a cluster, the default cluster is assumed.
+ */ + cluster?: string; + /** *The attributes to delete from your resource. You can specify up to 10 attributes per * request. For custom attributes, specify the attribute name and target ID, but do not @@ -2547,12 +2639,6 @@ export interface DeleteAttributesRequest { * specify the target type.
*/ attributes: Attribute[] | undefined; - - /** - *The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to delete - * attributes. If you do not specify a cluster, the default cluster is assumed.
- */ - cluster?: string; } export namespace DeleteAttributesRequest { @@ -2699,17 +2785,17 @@ export interface DeleteServiceRequest { */ cluster?: string; + /** + *The name of the service to delete.
+ */ + service: string | undefined; + /** *If true
, allows you to delete a service even if it has not been scaled
* down to zero tasks. It is only necessary to use this if the service is using the
* REPLICA
scheduling strategy.
The name of the service to delete.
- */ - service: string | undefined; } export namespace DeleteServiceRequest { @@ -2732,12 +2818,6 @@ export namespace DeleteServiceResponse { } export interface DeleteTaskSetRequest { - /** - *If true
, this allows you to delete a task set even if it hasn't been
- * scaled down to zero.
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task * set exists in to delete.
@@ -2754,6 +2834,12 @@ export interface DeleteTaskSetRequest { *The task set ID or full Amazon Resource Name (ARN) of the task set to delete.
*/ taskSet: string | undefined; + + /** + *If true
, this allows you to delete a task set even if it hasn't been
+ * scaled down to zero.
Describes the resources available for a container instance.
*/ export interface Resource { - /** - *When the doubleValue
type is set, the value of the resource must be a
- * double precision floating-point type.
The name of the resource, such as CPU
, MEMORY
,
* PORTS
, PORTS_UDP
, or a user-defined resource.
When the integerValue
type is set, the value of the resource must be an
- * integer.
The type of the resource, such as INTEGER
, DOUBLE
,
+ * LONG
, or STRINGSET
.
When the stringSetValue
type is set, the value of the resource must be a
- * string type.
When the doubleValue
type is set, the value of the resource must be a
+ * double precision floating-point type.
When the longValue
type is set, the value of the resource must be an
@@ -2863,10 +2943,16 @@ export interface Resource {
longValue?: number;
/**
- *
The type of the resource, such as INTEGER
, DOUBLE
,
- * LONG
, or STRINGSET
.
When the integerValue
type is set, the value of the resource must be an
+ * integer.
When the stringSetValue
type is set, the value of the resource must be a
+ * string type.
For CPU and memory resource types, this parameter describes the amount of each - * resource that was available on the container instance when the container agent - * registered it with Amazon ECS. This value represents the total amount of CPU and memory that - * can be allocated on this container instance to tasks. For port resource types, this - * parameter describes the ports that were reserved by the Amazon ECS container agent when it - * registered the container instance with Amazon ECS.
+ *The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance
namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID
.
The EC2 instance ID of the container instance.
+ */ + ec2InstanceId?: string; + + /** + *The capacity provider associated with the container instance.
+ */ + capacityProviderName?: string; + + /** + *The version counter for the container instance. Every time a container instance
+ * experiences a change that triggers a CloudWatch event, the version counter is
+ * incremented. If you are replicating your Amazon ECS container instance state with CloudWatch
+ * Events, you can compare the version of a container instance reported by the Amazon ECS APIs
+ * with the version reported in CloudWatch Events for the container instance (inside the
+ * detail
object) to verify that the version in your event stream is
+ * current.
The version information for the Amazon ECS container agent and Docker daemon running on the + * container instance.
+ */ + versionInfo?: VersionInfo; /** *For CPU and memory resource types, this parameter describes the remaining CPU and @@ -2930,42 +3038,14 @@ export interface ContainerInstance { remainingResources?: Resource[]; /** - *
The metadata that you apply to the container instance to help you categorize and - * organize them. Each tag consists of a key and an optional value, both of which you - * define.
- *The following basic restrictions apply to tags:
- *Maximum number of tags per resource - 50
- *For each resource, each tag key must be unique, and each tag key can have only - * one value.
- *Maximum key length - 128 Unicode characters in UTF-8
- *Maximum value length - 256 Unicode characters in UTF-8
- *If your tagging schema is used across multiple services and resources, - * remember that other services may have restrictions on allowed characters. - * Generally allowed characters are: letters, numbers, and spaces representable in - * UTF-8, and the following characters: + - = . _ : / @.
- *Tag keys and values are case-sensitive.
- *Do not use aws:
, AWS:
, or any upper or lowercase
- * combination of such as a prefix for either keys or values as it is reserved for
- * AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with
- * this prefix do not count against your tags per resource limit.
For CPU and memory resource types, this parameter describes the amount of each + * resource that was available on the container instance when the container agent + * registered it with Amazon ECS. This value represents the total amount of CPU and memory that + * can be allocated on this container instance to tasks. For port resource types, this + * parameter describes the ports that were reserved by the Amazon ECS container agent when it + * registered the container instance with Amazon ECS.
*/ - tags?: Tag[]; + registeredResources?: Resource[]; /** *The status of the container instance. The valid values are REGISTERING
,
@@ -2989,30 +3069,23 @@ export interface ContainerInstance {
status?: string;
/**
- *
The EC2 instance ID of the container instance.
+ *The reason that the container instance reached its current status.
*/ - ec2InstanceId?: string; + statusReason?: string; /** - *The capacity provider associated with the container instance.
- */ - capacityProviderName?: string; - - /** - *The version counter for the container instance. Every time a container instance
- * experiences a change that triggers a CloudWatch event, the version counter is
- * incremented. If you are replicating your Amazon ECS container instance state with CloudWatch
- * Events, you can compare the version of a container instance reported by the Amazon ECS APIs
- * with the version reported in CloudWatch Events for the container instance (inside the
- * detail
object) to verify that the version in your event stream is
- * current.
This parameter returns true
if the agent is connected to Amazon ECS.
+ * Registered instances with an agent that may be unhealthy or stopped return
+ * false
. Only instances connected to an agent can accept placement
+ * requests.
The Amazon Resource Name (ARN) of the container instance. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance
namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID
.
The number of tasks on the container instance that are in the RUNNING
+ * status.
The number of tasks on the container instance that are in the PENDING
@@ -3026,12 +3099,6 @@ export interface ContainerInstance {
*/
agentUpdateStatus?: AgentUpdateStatus | string;
- /**
- *
The resources attached to a container instance, such as elastic network - * interfaces.
- */ - attachments?: Attachment[]; - /** *The attributes set for the container instance, either by the Amazon ECS container agent at * instance registration or manually with the PutAttributes @@ -3039,35 +3106,54 @@ export interface ContainerInstance { */ attributes?: Attribute[]; - /** - *
The number of tasks on the container instance that are in the RUNNING
- * status.
The reason that the container instance reached its current status.
- */ - statusReason?: string; - /** *The Unix timestamp for when the container instance was registered.
*/ registeredAt?: Date; /** - *The version information for the Amazon ECS container agent and Docker daemon running on the - * container instance.
+ *The resources attached to a container instance, such as elastic network + * interfaces.
*/ - versionInfo?: VersionInfo; + attachments?: Attachment[]; /** - *This parameter returns true
if the agent is connected to Amazon ECS.
- * Registered instances with an agent that may be unhealthy or stopped return
- * false
. Only instances connected to an agent can accept placement
- * requests.
The metadata that you apply to the container instance to help you categorize and + * organize them. Each tag consists of a key and an optional value, both of which you + * define.
+ *The following basic restrictions apply to tags:
+ *Maximum number of tags per resource - 50
+ *For each resource, each tag key must be unique, and each tag key can have only + * one value.
+ *Maximum key length - 128 Unicode characters in UTF-8
+ *Maximum value length - 256 Unicode characters in UTF-8
+ *If your tagging schema is used across multiple services and resources, + * remember that other services may have restrictions on allowed characters. + * Generally allowed characters are: letters, numbers, and spaces representable in + * UTF-8, and the following characters: + - = . _ : / @.
+ *Tag keys and values are case-sensitive.
+ *Do not use aws:
, AWS:
, or any upper or lowercase
+ * combination of such as a prefix for either keys or values as it is reserved for
+ * AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with
+ * this prefix do not count against your tags per resource limit.
The name of a container.
+ */ + containerName: string | undefined; + /** *The dependency condition of the container. The following are the available conditions * and their behavior:
@@ -3169,11 +3260,6 @@ export interface ContainerDependency { *The name of a container.
- */ - containerName: string | undefined; } export namespace ContainerDependency { @@ -3228,14 +3314,14 @@ export namespace EnvironmentFile { */ export interface HostEntry { /** - *The IP address to use in the /etc/hosts
entry.
The hostname to use in the /etc/hosts
entry.
The hostname to use in the /etc/hosts
entry.
The IP address to use in the /etc/hosts
entry.
The number of times to retry a failed health check before the container is considered - * unhealthy. You may specify between 1 and 10 retries. The default value is 3.
+ *A string array representing the command that the container runs to determine if it is
+ * healthy. The string array must start with CMD
to execute the command
+ * arguments directly, or CMD-SHELL
to run the command with the container's
+ * default shell. For example:
+ * [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ]
+ *
An exit code of 0 indicates success, and non-zero exit code indicates failure. For
+ * more information, see HealthCheck
in the Create a container
+ * section of the Docker Remote API.
The time period in seconds between each health check execution. You may specify + * between 5 and 300 seconds. The default value is 30 seconds.
+ */ + interval?: number; /** *The time period in seconds to wait for a health check to succeed before it is @@ -3362,6 +3462,12 @@ export interface HealthCheck { */ timeout?: number; + /** + *
The number of times to retry a failed health check before the container is considered + * unhealthy. You may specify between 1 and 10 retries. The default value is 3.
+ */ + retries?: number; + /** *The optional grace period within which to provide containers time to bootstrap before * failed health checks count towards the maximum number of retries. You may specify @@ -3373,26 +3479,6 @@ export interface HealthCheck { * */ startPeriod?: number; - - /** - *
The time period in seconds between each health check execution. You may specify - * between 5 and 300 seconds. The default value is 30 seconds.
- */ - interval?: number; - - /** - *A string array representing the command that the container runs to determine if it is
- * healthy. The string array must start with CMD
to execute the command
- * arguments directly, or CMD-SHELL
to run the command with the container's
- * default shell. For example:
- * [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ]
- *
An exit code of 0 indicates success, and non-zero exit code indicates failure. For
- * more information, see HealthCheck
in the Create a container
- * section of the Docker Remote API.
An object representing a container instance host device.
*/ export interface Device { - /** - *The explicit permissions to provide to the container for the device. By default, the
- * container has permissions for read
, write
, and
- * mknod
for the device.
The path for the device on the host container instance.
*/ @@ -3482,6 +3561,13 @@ export interface Device { *The path inside the container at which to expose the host device.
*/ containerPath?: string; + + /** + *The explicit permissions to provide to the container for the device. By default, the
+ * container has permissions for read
, write
, and
+ * mknod
for the device.
The container path, mount options, and size of the tmpfs mount.
*/ export interface Tmpfs { + /** + *The absolute file path where the tmpfs volume is to be mounted.
+ */ + containerPath: string | undefined; + + /** + *The maximum size (in MiB) of the tmpfs volume.
+ */ + size: number | undefined; + /** *The list of tmpfs volume mount options.
*Valid values: "defaults" | "ro" | "rw" | "suid" | "nosuid" | "dev" | "nodev" |
@@ -3505,16 +3601,6 @@ export interface Tmpfs {
*
The maximum size (in MiB) of the tmpfs volume.
- */ - size: number | undefined; - - /** - *The absolute file path where the tmpfs volume is to be mounted.
- */ - containerPath: string | undefined; } export namespace Tmpfs { @@ -3527,16 +3613,6 @@ export namespace Tmpfs { *Linux-specific options that are applied to the container, such as Linux KernelCapabilities.
*/ export interface LinuxParameters { - /** - *The container path, mount options, and size (in MiB) of the tmpfs mount. This
- * parameter maps to the --tmpfs
option to docker run.
If you are using tasks that use the Fargate launch type, the
- * tmpfs
parameter is not supported.
The Linux capabilities for the container that are added to or dropped from the default * configuration provided by Docker.
@@ -3549,6 +3625,17 @@ export interface LinuxParameters { */ capabilities?: KernelCapabilities; + /** + *Any host devices to expose to the container. This parameter maps to
+ * Devices
in the Create a container section of the
+ * Docker Remote API and the --device
option to docker run.
If you are using tasks that use the Fargate launch type, the
+ * devices
parameter is not supported.
Run an init
process inside the container that forwards signals and reaps
* processes. This parameter maps to the --init
option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
@@ -3567,6 +3654,16 @@ export interface LinuxParameters {
*/
sharedMemorySize?: number;
+ /**
+ *
The container path, mount options, and size (in MiB) of the tmpfs mount. This
+ * parameter maps to the --tmpfs
option to docker run.
If you are using tasks that use the Fargate launch type, the
+ * tmpfs
parameter is not supported.
The total amount of swap memory (in MiB) a container can use. This parameter will be
* translated to the --memory-swap
option to docker run where the value would be the sum of
@@ -3598,17 +3695,6 @@ export interface LinuxParameters {
*
*/
swappiness?: number;
-
- /**
- *
Any host devices to expose to the container. This parameter maps to
- * Devices
in the Create a container section of the
- * Docker Remote API and the --device
option to docker run.
If you are using tasks that use the Fargate launch type, the
- * devices
parameter is not supported.
If this value is true
, the container has read-only access to the volume.
- * If this value is false
, then the container can write to the volume. The
- * default value is false
.
The name of the volume to mount. Must be a volume name referenced in the
+ * name
parameter of task definition volume
.
The path on the container to mount the host volume at.
@@ -3768,10 +3853,11 @@ export interface MountPoint { containerPath?: string; /** - *The name of the volume to mount. Must be a volume name referenced in the
- * name
parameter of task definition volume
.
If this value is true
, the container has read-only access to the volume.
+ * If this value is false
, then the container can write to the volume. The
+ * default value is false
.
The protocol used for the port mapping. Valid values are tcp
and
- * udp
. The default is tcp
.
The port number on the container that is bound to the user-specified or automatically + * assigned host port.
+ *If you are using containers in a task with the awsvpc
or
+ * host
network mode, exposed ports should be specified using
+ * containerPort
.
If you are using containers in a task with the bridge
network mode and
+ * you specify a container port and not a host port, your container automatically receives
+ * a host port in the ephemeral port range. For more information, see
+ * hostPort
. Port mappings that are automatically assigned in this way do not
+ * count toward the 100 reserved ports limit of a container instance.
The port number on the container instance to reserve for your container.
@@ -3837,18 +3931,10 @@ export interface PortMapping { hostPort?: number; /** - *The port number on the container that is bound to the user-specified or automatically - * assigned host port.
- *If you are using containers in a task with the awsvpc
or
- * host
network mode, exposed ports should be specified using
- * containerPort
.
If you are using containers in a task with the bridge
network mode and
- * you specify a container port and not a host port, your container automatically receives
- * a host port in the ephemeral port range. For more information, see
- * hostPort
. Port mappings that are automatically assigned in this way do not
- * count toward the 100 reserved ports limit of a container instance.
The protocol used for the port mapping. Valid values are tcp
and
+ * udp
. The default is tcp
.
The hard limit for the ulimit type.
+ *The soft limit for the ulimit type.
*/ - hardLimit: number | undefined; + softLimit: number | undefined; /** - *The soft limit for the ulimit type.
+ *The hard limit for the ulimit type.
*/ - softLimit: number | undefined; + hardLimit: number | undefined; } export namespace Ulimit { @@ -4009,18 +4095,18 @@ export namespace Ulimit { *Details on a data volume from another container in the same task definition.
*/ export interface VolumeFrom { + /** + *The name of another container within the same task definition from which to mount + * volumes.
+ */ + sourceContainer?: string; + /** *If this value is true
, the container has read-only access to the volume.
* If this value is false
, then the container can write to the volume. The
* default value is false
.
The name of another container within the same task definition from which to mount - * volumes.
- */ - sourceContainer?: string; } export namespace VolumeFrom { @@ -4035,73 +4121,113 @@ export namespace VolumeFrom { */ export interface ContainerDefinition { /** - *When this parameter is true, networking is disabled within the container. This
- * parameter maps to NetworkDisabled
in the Create a container
- * section of the Docker Remote API.
This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.
- *The name of a container. If you are linking multiple containers together in a task
+ * definition, the name
of one container can be entered in the
+ * links
of another container to connect the containers.
+ * Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. This parameter maps to name
in the
+ * Create a container section of the Docker Remote API and the
+ * --name
option to docker
+ * run.
A list of hostnames and IP address mappings to append to the /etc/hosts
- * file on the container. This parameter maps to ExtraHosts
in the
+ *
The image used to start a container. This string is passed directly to the Docker
+ * daemon. Images in the Docker Hub registry are available by default. Other repositories
+ * are specified with either
+ * repository-url/image:tag
+ *
or
+ * repository-url/image@digest
+ *
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the
* Create a container section of the Docker Remote API and the
- * --add-host
option to docker
+ * IMAGE
parameter of docker
* run.
This parameter is not supported for Windows containers or tasks that use the
- * awsvpc
network mode.
A list of ulimits
to set in the container. If a ulimit value is specified
- * in a task definition, it will override the default values set by Docker. This parameter
- * maps to Ulimits
in the Create a container section of the
- * Docker Remote API and the --ulimit
option to docker run. Valid naming values are displayed
- * in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
- *
This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.
- *When this parameter is true
, a TTY is allocated. This parameter maps to
- * Tty
in the Create a container section of the
- * Docker Remote API and the --tty
option to docker run.
Data volumes to mount from another container. This parameter maps to
- * VolumesFrom
in the Create a container section of the
- * Docker Remote API and the --volumes-from
option to docker run.
When a new task starts, the Amazon ECS container agent pulls the latest version of + * the specified image and tag for the container to use. However, subsequent + * updates to a repository image are not propagated to already running + * tasks.
+ *Images in Amazon ECR repositories can be specified by either using the full
+ * registry/repository:tag
or
+ * registry/repository@digest
. For example,
+ * 012345678910.dkr.ecr.
+ * or
+ * 012345678910.dkr.ecr.
.
+ *
Images in official repositories on Docker Hub use a single name (for example,
+ * ubuntu
or mongo
).
Images in other repositories on Docker Hub are qualified with an organization
+ * name (for example, amazon/amazon-ecs-agent
).
Images in other online repositories are qualified further by a domain name
+ * (for example, quay.io/assemblyline/ubuntu
).
The container health check command and associated configuration parameters for the
- * container. This parameter maps to HealthCheck
in the
- * Create a container section of the Docker Remote API and the
- * HEALTHCHECK
parameter of docker
- * run.
The private repository authentication credentials to use.
*/ - healthCheck?: HealthCheck; + repositoryCredentials?: RepositoryCredentials; /** - *A list of DNS search domains that are presented to the container. This parameter maps
- * to DnsSearch
in the Create a container section of the
- * Docker Remote API and the --dns-search
option to docker run.
This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.
- *The number of cpu
units reserved for the container. This parameter maps
+ * to CpuShares
in the Create a container section of the
+ * Docker Remote API and the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the
+ * only requirement is that the total amount of CPU reserved for all containers within a
+ * task be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type + * by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page + * by 1,024.
+ *Linux containers share unallocated CPU units with other containers on the container + * instance with the same ratio as their allocated amount. For example, if you run a + * single-container task on a single-core instance type with 512 CPU units specified for + * that container, and that is the only task running on the container instance, that + * container could use the full 1,024 CPU unit share at any given time. However, if you + * launched another copy of the same task on that container instance, each task would be + * guaranteed a minimum of 512 CPU units when needed, and each container could float to + * higher CPU usage if the other container was not using it, but if both tasks were 100% + * active all of the time, they would be limited to 512 CPU units.
+ *On Linux container instances, the Docker daemon on the container instance uses the CPU + * value to calculate the relative CPU share ratios for running containers. For more + * information, see CPU share + * constraint in the Docker documentation. The minimum valid CPU share value + * that the Linux kernel allows is 2. However, the CPU parameter is not required, and you + * can use CPU values below 2 in your container definitions. For CPU values below 2 + * (including null), the behavior varies based on your Amazon ECS container agent + * version:
+ *+ * Agent versions less than or equal to 1.1.0: + * Null and zero CPU values are passed to Docker as 0, which Docker then converts + * to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux + * kernel converts to two CPU shares.
+ *+ * Agent versions greater than or equal to 1.2.0: + * Null, zero, and CPU values of 1 are passed to Docker as 2.
+ *On Windows container instances, the CPU limit is enforced as an absolute limit, or a
+ * quota. Windows containers only have access to the specified amount of CPU that is
+ * described in the task definition. A null or zero CPU value is passed to Docker as
+ * 0
, which Windows interprets as 1% of one CPU.
The amount (in MiB) of memory to present to the container. If your container attempts @@ -4124,49 +4250,94 @@ export interface ContainerDefinition { memory?: number; /** - *
The user name to use inside the container. This parameter maps to The soft limit (in MiB) of memory to reserve for the container. When system memory is
+ * under heavy contention, Docker attempts to keep the container memory to this soft limit.
+ * However, your container can consume more memory when it needs to, up to either the hard
+ * limit specified with the If a task-level memory value is not specified, you must specify a non-zero integer for
+ * one or both of For example, if your container normally uses 128 MiB of memory, but occasionally
+ * bursts to 256 MiB of memory for short periods of time, you can set a
+ * The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should
+ * not specify fewer than 4 MiB of memory for your containers. The You can use the following formats. If specifying a UID or GID, you must specify it as
- * a positive integer.
- *
- *
- *
- *
- *
- * This parameter is not supported for Windows containers or tasks that use the awsvpc network mode. Containers that are collocated on a single container instance may be able to
+ * communicate with each other without requiring links or host port mappings. Network
+ * isolation is achieved on the container instance using security groups and VPC
+ * settings. The list of port mappings for the container. Port mappings allow containers to access
+ * ports on the host container instance to send or receive traffic. For task definitions that use the Port mappings on Windows use the This parameter maps to After a task reaches the If the All tasks must have at least one essential container. If you have an application that
+ * is composed of multiple containers, you should group containers that are used for a
+ * common purpose into components, and separate the different components into multiple task
+ * definitions. For more information, see Application
+ * Architecture in the Amazon Elastic Container Service Developer Guide. When this parameter is The command that is passed to the container. This parameter maps to The log configuration specification for the container. This parameter maps to Amazon ECS currently supports a subset of the logging drivers available to the Docker
- * daemon (shown in the LogConfiguration data type). Additional log
- * drivers may be available in future releases of the Amazon ECS container agent. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: The environment variables to pass to a container. This parameter maps to
+ * We do not recommend using plaintext environment variables for sensitive
+ * information, such as credential data. A list of files containing the environment variables to pass to a container. This
+ * parameter maps to the You can specify up to ten environment files. The file must have a If there are environment variables specified using the This field is not valid for containers in tasks using the Fargate launch
+ * type. The mount points for data volumes in your container. This parameter maps to Windows containers can mount whole directories on the same drive as
+ * Data volumes to mount from another container. This parameter maps to
+ * Linux-specific modifications that are applied to the container, such as Linux kernel
+ * capabilities. For more information see KernelCapabilities. The Amazon ECS container agent running on a container instance must register the
- * logging drivers available on that instance with the
- * This parameter is not supported for Windows containers. The This parameter is not supported for Windows containers or tasks that use the awsvpc network mode. Containers that are collocated on a single container instance may be able to
- * communicate with each other without requiring links or host port mappings. Network
- * isolation is achieved on the container instance using security groups and VPC
- * settings. The secrets to pass to the container. For more information, see Specifying
+ * Sensitive Data in the Amazon Elastic Container Service Developer Guide. The dependencies defined for container startup and shutdown. A container can contain
+ * multiple dependencies. When a dependency is defined for container startup, for container
+ * shutdown it is reversed. For tasks using the EC2 launch type, the container instances require at
+ * least version 1.26.0 of the container agent to enable container dependencies. However,
+ * we recommend using the latest container agent version. For information about checking
+ * your agent version and updating to the latest version, see Updating the Amazon ECS
+ * Container Agent in the Amazon Elastic Container Service Developer Guide. If you are
+ * using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the
+ * For tasks using the Fargate launch type, the task or service requires
+ * platform version Time duration (in seconds) to wait before giving up on resolving dependencies for a
@@ -4270,6 +4469,30 @@ export interface ContainerDefinition {
*/
startTimeout?: number;
+ /**
+ * Time duration (in seconds) to wait before the container is forcefully killed if it
+ * doesn't exit normally on its own. For tasks using the Fargate launch type, the task or service requires
+ * platform version 1.3.0 or later. The max stop timeout value is 120 seconds and if the
+ * parameter is not specified, the default value of 30 seconds is used. For tasks using the EC2 launch type, if the The hostname to use for your container. This parameter maps to The type and amount of a resource to assign to a container. The only supported
- * resource is a GPU. The FireLens configuration for the container. This is used to specify and configure a
- * log router for container logs. For more information, see Custom Log Routing
- * in the Amazon Elastic Container Service Developer Guide. The image used to start a container. This string is passed directly to the Docker
- * daemon. Images in the Docker Hub registry are available by default. Other repositories
- * are specified with either The user to use inside the container. This parameter maps to User
in
- * the Create a container section of the Docker Remote API and the
- * --user
option to docker
+ * memory
parameter (if applicable), or all of the
+ * available memory on the container instance, whichever comes first. This parameter maps
+ * to MemoryReservation
in the Create a container section of
+ * the Docker Remote API and the --memory-reservation
option to docker run.memory
or memoryReservation
in a container
+ * definition. If you specify both, memory
must be greater than
+ * memoryReservation
. If you specify memoryReservation
, then
+ * that value is subtracted from the available memory resources for the container instance
+ * on which the container is placed. Otherwise, the value of memory
is
+ * used.memoryReservation
of 128 MiB, and a memory
hard limit of
+ * 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory
+ * from the remaining resources on the container instance, but also allow the container to
+ * consume more memory resources when needed.links
parameter allows containers to communicate with each other
+ * without the need for port mappings. This parameter is only supported if the network mode
+ * of a task definition is bridge
. The name:internalName
+ * construct is analogous to name:alias
in Docker links.
+ * Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. For more information about linking Docker containers, go to
+ * Legacy container links
+ * in the Docker documentation. This parameter maps to Links
in the
+ * Create a container section of the Docker Remote API and the
+ * --link
option to docker
* run.
- *
* user
- * user:group
- * uid
- * uid:gid
- * user:gid
- * uid:group
- * awsvpc
network mode, you should only
+ * specify the containerPort
. The hostPort
can be left blank or
+ * it must be the same value as the containerPort
.NetNAT
gateway address rather than
+ * localhost
. There is no loopback for port mappings on Windows, so you
+ * cannot access a container's mapped port from the host itself. PortBindings
in the
+ * Create a container section of the Docker Remote API and the
+ * --publish
option to docker
+ * run. If the network mode of a task definition is set to none
,
+ * then you can't specify port mappings. If the network mode of a task definition is set to
+ * host
, then host ports must either be undefined or they must match the
+ * container port in the port mapping.RUNNING
status, manual and automatic host
+ * and container port assignments are visible in the Network
+ * Bindings section of a container description for a selected task in
+ * the Amazon ECS console. The assignments are also visible in the
+ * networkBindings
section DescribeTasks
+ * responses.essential
parameter of a container is marked as true
,
+ * and that container fails or stops for any reason, all other containers that are part of
+ * the task are stopped. If the essential
parameter of a container is marked
+ * as false
, then its failure does not affect the rest of the containers in a
+ * task. If this parameter is omitted, a container is assumed to be essential.true
, this allows you to deploy containerized
- * applications that require stdin
or a tty
to be allocated. This
- * parameter maps to OpenStdin
in the Create a container
- * section of the Docker Remote API and the --interactive
option to docker run.Cmd
+ * in the Create a container section of the Docker Remote API and the
+ * COMMAND
parameter to docker
+ * run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each
+ * argument should be a separated string in the array.LogConfig
in the
- * Create a container section of the Docker Remote API and the
- * --log-driver
option to docker
- * run. By default, containers use the same logging driver that the Docker
- * daemon uses. However the container may use a different logging driver than the Docker
- * daemon by specifying a log driver with this parameter in the container definition. To
- * use a different logging driver for a container, the log system must be configured
- * properly on the container instance (or on a different log server for remote logging
- * options). For more information on the options for different supported log drivers, see
- * Configure
- * logging drivers in the Docker documentation.sudo docker version --format '{{.Server.APIVersion}}'
- * Env
in the Create a container section of the
+ * Docker Remote API and the --env
option to docker run.--env-file
option to docker run..env
+ * file extension. Each line in an environment file should contain an environment variable
+ * in VARIABLE=VALUE
format. Lines beginning with #
are treated
+ * as comments and are ignored. For more information on the environment variable file
+ * syntax, see Declare default
+ * environment variables in file.environment
+ * parameter in a container definition, they take precedence over the variables contained
+ * within an environment file. If multiple environment files are specified that contain the
+ * same variable, they are processed from the top down. It is recommended to use unique
+ * variable names. For more information, see Specifying Environment
+ * Variables in the Amazon Elastic Container Service Developer Guide.Volumes
in the Create a container
+ * section of the Docker Remote API and the --volume
option to docker run.$env:ProgramData
. Windows containers cannot mount directories on a
+ * different drive, and mount point cannot be across drives.VolumesFrom
in the Create a container section of the
+ * Docker Remote API and the --volumes-from
option to docker run.ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before
- * containers placed on that instance can use these log configuration options. For more
- * information, see Amazon ECS Container
- * Agent Configuration in the Amazon Elastic Container Service Developer Guide.links
parameter allows containers to communicate with each other
- * without the need for port mappings. This parameter is only supported if the network mode
- * of a task definition is bridge
. The name:internalName
- * construct is analogous to name:alias
in Docker links.
- * Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. For more information about linking Docker containers, go to
- * Legacy container links
- * in the Docker documentation. This parameter maps to Links
in the
- * Create a container section of the Docker Remote API and the
- * --link
option to docker
- * run.ecs-init
package. If your container instances are launched from version
+ * 20190301
or later, then they contain the required versions of the
+ * container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.1.3.0
or later.stopTimeout
+ * parameter is not specified, the value set for the Amazon ECS container agent configuration
+ * variable ECS_CONTAINER_STOP_TIMEOUT
is used by default. If neither the
+ * stopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
+ * agent configuration variable are set, then the default values of 30 seconds for Linux
+ * containers and 30 seconds on Windows containers are used. Your container instances
+ * require at least version 1.26.0 of the container agent to enable a container stop
+ * timeout value. However, we recommend using the latest container agent version. For
+ * information about checking your agent version and updating to the latest version, see
+ * Updating the Amazon ECS
+ * Container Agent in the Amazon Elastic Container Service Developer Guide. If you are
+ * using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the
+ * ecs-init
package. If your container instances are launched from version
+ * 20190301
or later, then they contain the required versions of the
+ * container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.Hostname
* in the Create a container section of the Docker Remote API and the
@@ -4283,236 +4506,94 @@ export interface ContainerDefinition {
hostname?: string;
/**
- *
- * repository-url/image:tag
- *
or
- * repository-url/image@digest
- *
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the
+ * User
in the
* Create a container section of the Docker Remote API and the
- * IMAGE
parameter of docker
- * run.--user
option to docker
+ * run.
When running tasks using the host
network mode, you should not run
+ * containers using the root user (UID 0). It is considered best practice to use a
+ * non-root user.
You can specify the user
using the following formats. If specifying a UID
+ * or GID, you must specify it as a positive integer.
When a new task starts, the Amazon ECS container agent pulls the latest version of - * the specified image and tag for the container to use. However, subsequent - * updates to a repository image are not propagated to already running - * tasks.
- *Images in Amazon ECR repositories can be specified by either using the full
- * registry/repository:tag
or
- * registry/repository@digest
. For example,
- * 012345678910.dkr.ecr.
- * or
- * 012345678910.dkr.ecr.
.
- *
+ * user
+ *
Images in official repositories on Docker Hub use a single name (for example,
- * ubuntu
or mongo
).
+ * user:group
+ *
Images in other repositories on Docker Hub are qualified with an organization
- * name (for example, amazon/amazon-ecs-agent
).
+ * uid
+ *
Images in other online repositories are qualified further by a domain name
- * (for example, quay.io/assemblyline/ubuntu
).
+ * uid:gid
+ *
The soft limit (in MiB) of memory to reserve for the container. When system memory is
- * under heavy contention, Docker attempts to keep the container memory to this soft limit.
- * However, your container can consume more memory when it needs to, up to either the hard
- * limit specified with the memory
parameter (if applicable), or all of the
- * available memory on the container instance, whichever comes first. This parameter maps
- * to MemoryReservation
in the Create a container section of
- * the Docker Remote API and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for
- * one or both of memory
or memoryReservation
in a container
- * definition. If you specify both, memory
must be greater than
- * memoryReservation
. If you specify memoryReservation
, then
- * that value is subtracted from the available memory resources for the container instance
- * on which the container is placed. Otherwise, the value of memory
is
- * used.
For example, if your container normally uses 128 MiB of memory, but occasionally
- * bursts to 256 MiB of memory for short periods of time, you can set a
- * memoryReservation
of 128 MiB, and a memory
hard limit of
- * 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory
- * from the remaining resources on the container instance, but also allow the container to
- * consume more memory resources when needed.
The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should - * not specify fewer than 4 MiB of memory for your containers.
- */ - memoryReservation?: number; - - /** - *The list of port mappings for the container. Port mappings allow containers to access - * ports on the host container instance to send or receive traffic.
- *For task definitions that use the awsvpc
network mode, you should only
- * specify the containerPort
. The hostPort
can be left blank or
- * it must be the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than
- * localhost
. There is no loopback for port mappings on Windows, so you
- * cannot access a container's mapped port from the host itself.
This parameter maps to PortBindings
in the
- * Create a container section of the Docker Remote API and the
- * --publish
option to docker
- * run. If the network mode of a task definition is set to none
,
- * then you can't specify port mappings. If the network mode of a task definition is set to
- * host
, then host ports must either be undefined or they must match the
- * container port in the port mapping.
After a task reaches the RUNNING
status, manual and automatic host
- * and container port assignments are visible in the Network
- * Bindings section of a container description for a selected task in
- * the Amazon ECS console. The assignments are also visible in the
- * networkBindings
section DescribeTasks
- * responses.
The dependencies defined for container startup and shutdown. A container can contain - * multiple dependencies. When a dependency is defined for container startup, for container - * shutdown it is reversed.
- *For tasks using the EC2 launch type, the container instances require at
- * least version 1.26.0 of the container agent to enable container dependencies. However,
- * we recommend using the latest container agent version. For information about checking
- * your agent version and updating to the latest version, see Updating the Amazon ECS
- * Container Agent in the Amazon Elastic Container Service Developer Guide. If you are
- * using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the
- * ecs-init
package. If your container instances are launched from version
- * 20190301
or later, then they contain the required versions of the
- * container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
For tasks using the Fargate launch type, the task or service requires
- * platform version 1.3.0
or later.
The number of cpu
units reserved for the container. This parameter maps
- * to CpuShares
in the Create a container section of the
- * Docker Remote API and the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the
- * only requirement is that the total amount of CPU reserved for all containers within a
- * task be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type - * by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page - * by 1,024.
- *Linux containers share unallocated CPU units with other containers on the container - * instance with the same ratio as their allocated amount. For example, if you run a - * single-container task on a single-core instance type with 512 CPU units specified for - * that container, and that is the only task running on the container instance, that - * container could use the full 1,024 CPU unit share at any given time. However, if you - * launched another copy of the same task on that container instance, each task would be - * guaranteed a minimum of 512 CPU units when needed, and each container could float to - * higher CPU usage if the other container was not using it, but if both tasks were 100% - * active all of the time, they would be limited to 512 CPU units.
- *On Linux container instances, the Docker daemon on the container instance uses the CPU - * value to calculate the relative CPU share ratios for running containers. For more - * information, see CPU share - * constraint in the Docker documentation. The minimum valid CPU share value - * that the Linux kernel allows is 2. However, the CPU parameter is not required, and you - * can use CPU values below 2 in your container definitions. For CPU values below 2 - * (including null), the behavior varies based on your Amazon ECS container agent - * version:
- *- * Agent versions less than or equal to 1.1.0: - * Null and zero CPU values are passed to Docker as 0, which Docker then converts - * to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux - * kernel converts to two CPU shares.
+ *user:gid
+ *
* - * Agent versions greater than or equal to 1.2.0: - * Null, zero, and CPU values of 1 are passed to Docker as 2.
+ *uid:group
+ *
* On Windows container instances, the CPU limit is enforced as an absolute limit, or a
- * quota. Windows containers only have access to the specified amount of CPU that is
- * described in the task definition. A null or zero CPU value is passed to Docker as
- * 0
, which Windows interprets as 1% of one CPU.
This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.
+ *Time duration (in seconds) to wait before the container is forcefully killed if it - * doesn't exit normally on its own.
- *For tasks using the Fargate launch type, the task or service requires - * platform version 1.3.0 or later. The max stop timeout value is 120 seconds and if the - * parameter is not specified, the default value of 30 seconds is used.
- *For tasks using the EC2 launch type, if the stopTimeout
- * parameter is not specified, the value set for the Amazon ECS container agent configuration
- * variable ECS_CONTAINER_STOP_TIMEOUT
is used by default. If neither the
- * stopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
- * agent configuration variable are set, then the default values of 30 seconds for Linux
- * containers and 30 seconds on Windows containers are used. Your container instances
- * require at least version 1.26.0 of the container agent to enable a container stop
- * timeout value. However, we recommend using the latest container agent version. For
- * information about checking your agent version and updating to the latest version, see
- * Updating the Amazon ECS
- * Container Agent in the Amazon Elastic Container Service Developer Guide. If you are
- * using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the
- * ecs-init
package. If your container instances are launched from version
- * 20190301
or later, then they contain the required versions of the
- * container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The working directory in which to run commands inside the container. This parameter
+ * maps to WorkingDir
in the Create a container section of the
+ * Docker Remote API and the --workdir
option to docker run.
A key/value map of labels to add to the container. This parameter maps to
- * Labels
in the Create a container section of the
- * Docker Remote API and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
- *
When this parameter is true, networking is disabled within the container. This
+ * parameter maps to NetworkDisabled
in the Create a container
+ * section of the Docker Remote API.
This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.
+ *If the essential
parameter of a container is marked as true
,
- * and that container fails or stops for any reason, all other containers that are part of
- * the task are stopped. If the essential
parameter of a container is marked
- * as false
, then its failure does not affect the rest of the containers in a
- * task. If this parameter is omitted, a container is assumed to be essential.
All tasks must have at least one essential container. If you have an application that - * is composed of multiple containers, you should group containers that are used for a - * common purpose into components, and separate the different components into multiple task - * definitions. For more information, see Application - * Architecture in the Amazon Elastic Container Service Developer Guide.
+ *When this parameter is true, the container is given elevated privileges on the host
+ * container instance (similar to the root
user). This parameter maps to
+ * Privileged
in the Create a container section of the
+ * Docker Remote API and the --privileged
option to docker run.
This parameter is not supported for Windows containers or tasks using the Fargate launch type.
+ *Linux-specific modifications that are applied to the container, such as Linux kernel - * capabilities. For more information see KernelCapabilities.
- *This parameter is not supported for Windows containers.
- *When this parameter is true, the container is given read-only access to its root file
+ * system. This parameter maps to ReadonlyRootfs
in the
+ * Create a container section of the Docker Remote API and the
+ * --read-only
option to docker
+ * run.
This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.
+ *A list of DNS servers that are presented to the container. This parameter maps to @@ -4525,46 +4606,27 @@ export interface ContainerDefinition { dnsServers?: string[]; /** - *
The private repository authentication credentials to use.
+ *A list of DNS search domains that are presented to the container. This parameter maps
+ * to DnsSearch
in the Create a container section of the
+ * Docker Remote API and the --dns-search
option to docker run.
This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.
+ *A list of namespaced kernel parameters to set in the container. This parameter maps to
- * Sysctls
in the Create a container section of the
- * Docker Remote API and the --sysctl
option to docker run.
A list of hostnames and IP address mappings to append to the /etc/hosts
+ * file on the container. This parameter maps to ExtraHosts
in the
+ * Create a container section of the Docker Remote API and the
+ * --add-host
option to docker
+ * run.
It is not recommended that you specify network-related systemControls
- * parameters for multiple containers in a single task that also uses either the
- * awsvpc
or host
network modes. For tasks that use the
- * awsvpc
network mode, the container that is started last determines
- * which systemControls
parameters take effect. For tasks that use the
- * host
network mode, it changes the container instance's namespaced
- * kernel parameters as well as the containers.
This parameter is not supported for Windows containers or tasks that use the
+ * awsvpc
network mode.
The mount points for data volumes in your container.
- *This parameter maps to Volumes
in the Create a container
- * section of the Docker Remote API and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as
- * $env:ProgramData
. Windows containers cannot mount directories on a
- * different drive, and mount point cannot be across drives.
The name of a container. If you are linking multiple containers together in a task
- * definition, the name
of one container can be entered in the
- * links
of another container to connect the containers.
- * Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. This parameter maps to name
in the
- * Create a container section of the Docker Remote API and the
- * --name
option to docker
- * run.
A list of strings to provide custom labels for SELinux and AppArmor multi-level @@ -4593,80 +4655,109 @@ export interface ContainerDefinition { dockerSecurityOptions?: string[]; /** - *
A list of files containing the environment variables to pass to a container. This
- * parameter maps to the --env-file
option to docker run.
You can specify up to ten environment files. The file must have a .env
- * file extension. Each line in an environment file should contain an environment variable
- * in VARIABLE=VALUE
format. Lines beginning with #
are treated
- * as comments and are ignored. For more information on the environment variable file
- * syntax, see Declare default
- * environment variables in file.
If there are environment variables specified using the environment
- * parameter in a container definition, they take precedence over the variables contained
- * within an environment file. If multiple environment files are specified that contain the
- * same variable, they are processed from the top down. It is recommended to use unique
- * variable names. For more information, see Specifying Environment
- * Variables in the Amazon Elastic Container Service Developer Guide.
This field is not valid for containers in tasks using the Fargate launch - * type.
+ *When this parameter is true
, this allows you to deploy containerized
+ * applications that require stdin
or a tty
to be allocated. This
+ * parameter maps to OpenStdin
in the Create a container
+ * section of the Docker Remote API and the --interactive
option to docker run.
The environment variables to pass to a container. This parameter maps to
- * Env
in the Create a container section of the
- * Docker Remote API and the --env
option to docker run.
We do not recommend using plaintext environment variables for sensitive - * information, such as credential data.
- *When this parameter is true
, a TTY is allocated. This parameter maps to
+ * Tty
in the Create a container section of the
+ * Docker Remote API and the --tty
option to docker run.
When this parameter is true, the container is given read-only access to its root file
- * system. This parameter maps to ReadonlyRootfs
in the
- * Create a container section of the Docker Remote API and the
- * --read-only
option to docker
- * run.
A key/value map of labels to add to the container. This parameter maps to
+ * Labels
in the Create a container section of the
+ * Docker Remote API and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
+ *
A list of ulimits
to set in the container. If a ulimit value is specified
+ * in a task definition, it will override the default values set by Docker. This parameter
+ * maps to Ulimits
in the Create a container section of the
+ * Docker Remote API and the --ulimit
option to docker run. Valid naming values are displayed
+ * in the Ulimit data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
+ *
This parameter is not supported for Windows containers or tasks that use the awsvpc network mode.
*The working directory in which to run commands inside the container. This parameter
- * maps to WorkingDir
in the Create a container section of the
- * Docker Remote API and the --workdir
option to docker run.
The log configuration specification for the container.
+ *This parameter maps to LogConfig
in the
+ * Create a container section of the Docker Remote API and the
+ * --log-driver
option to docker
+ * run. By default, containers use the same logging driver that the Docker
+ * daemon uses. However the container may use a different logging driver than the Docker
+ * daemon by specifying a log driver with this parameter in the container definition. To
+ * use a different logging driver for a container, the log system must be configured
+ * properly on the container instance (or on a different log server for remote logging
+ * options). For more information on the options for different supported log drivers, see
+ * Configure
+ * logging drivers in the Docker documentation.
Amazon ECS currently supports a subset of the logging drivers available to the Docker + * daemon (shown in the LogConfiguration data type). Additional log + * drivers may be available in future releases of the Amazon ECS container agent.
+ *This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
+ *
The Amazon ECS container agent running on a container instance must register the
+ * logging drivers available on that instance with the
+ * ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before
+ * containers placed on that instance can use these log configuration options. For more
+ * information, see Amazon ECS Container
+ * Agent Configuration in the Amazon Elastic Container Service Developer Guide.
When this parameter is true, the container is given elevated privileges on the host
- * container instance (similar to the root
user). This parameter maps to
- * Privileged
in the Create a container section of the
- * Docker Remote API and the --privileged
option to docker run.
This parameter is not supported for Windows containers or tasks using the Fargate launch type.
- *The container health check command and associated configuration parameters for the
+ * container. This parameter maps to HealthCheck
in the
+ * Create a container section of the Docker Remote API and the
+ * HEALTHCHECK
parameter of docker
+ * run.
The command that is passed to the container. This parameter maps to Cmd
- * in the Create a container section of the Docker Remote API and the
- * COMMAND
parameter to docker
- * run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each
- * argument should be a separated string in the array.
A list of namespaced kernel parameters to set in the container. This parameter maps to
+ * Sysctls
in the Create a container section of the
+ * Docker Remote API and the --sysctl
option to docker run.
It is not recommended that you specify network-related systemControls
+ * parameters for multiple containers in a single task that also uses either the
+ * awsvpc
or host
network modes. For tasks that use the
+ * awsvpc
network mode, the container that is started last determines
+ * which systemControls
parameters take effect. For tasks that use the
+ * host
network mode, it changes the container instance's namespaced
+ * kernel parameters as well as the containers.
The secrets to pass to the container. For more information, see Specifying - * Sensitive Data in the Amazon Elastic Container Service Developer Guide.
+ *The type and amount of a resource to assign to a container. The only supported + * resource is a GPU.
*/ - secrets?: Secret[]; + resourceRequirements?: ResourceRequirement[]; + + /** + *The FireLens configuration for the container. This is used to specify and configure a + * log router for container logs. For more information, see Custom Log Routing + * in the Amazon Elastic Container Service Developer Guide.
+ */ + firelensConfiguration?: FirelensConfiguration; } export namespace ContainerDefinition { @@ -4763,11 +4854,19 @@ export enum ProxyConfigurationType { * instances are launched from the Amazon ECS-optimized AMI version20190301
or
* later, then they contain the required versions of the container agent and
* ecs-init
. For more information, see Amazon ECS-optimized Linux AMI
- * in the Amazon Elastic Container Service Developer Guide.
- * For tasks using the Fargate launch type, the task or service requires - * platform version 1.3.0 or later.
+ * */ export interface ProxyConfiguration { + /** + *The proxy type. The only supported value is APPMESH
.
The name of the container that will serve as the App Mesh proxy.
+ */ + containerName: string | undefined; + /** *The set of network configuration parameters to provide the Container Network Interface * (CNI) plugin, specified as key-value pairs.
@@ -4810,23 +4909,13 @@ export interface ProxyConfiguration { * *
- * EgressIgnoredIPs
- (Required) The egress traffic going to
- * the specified IP addresses is ignored and not redirected to the
- * ProxyEgressPort
. It can be an empty list.
The proxy type. The only supported value is APPMESH
.
The name of the container that will serve as the App Mesh proxy.
+ *EgressIgnoredIPs
- (Required) The egress traffic going to
+ * the specified IP addresses is ignored and not redirected to the
+ * ProxyEgressPort
. It can be an empty list.
+ *
+ *
*/
- containerName: string | undefined;
+ properties?: KeyValuePair[];
}
export namespace ProxyConfiguration {
@@ -4852,6 +4941,14 @@ export enum Scope {
* host
instead.
*/
export interface DockerVolumeConfiguration {
+ /**
+ * The scope for the Docker volume that determines its lifecycle. Docker volumes that are
+ * scoped to a task
are automatically provisioned when the task starts and
+ * destroyed when the task stops. Docker volumes that are scoped as shared
+ * persist after the task stops.
If this value is true
, the Docker volume is created if it does not
* already exist.
Custom metadata to add to your Docker volume. This parameter maps to
- * Labels
in the Create a volume section of the
- * Docker Remote API and the xxlabel
option to docker
- * volume create.
The Docker volume driver to use. The driver value must match the driver name provided * by Docker because it is used for task placement. If the driver was installed using the @@ -4891,12 +4980,12 @@ export interface DockerVolumeConfiguration { driverOpts?: { [key: string]: string }; /** - *
The scope for the Docker volume that determines its lifecycle. Docker volumes that are
- * scoped to a task
are automatically provisioned when the task starts and
- * destroyed when the task stops. Docker volumes that are scoped as shared
- * persist after the task stops.
Custom metadata to add to your Docker volume. This parameter maps to
+ * Labels
in the Create a volume section of the
+ * Docker Remote API and the xxlabel
option to docker
+ * volume create.
The authorization configuration details for the Amazon EFS file system.
*/ export interface EFSAuthorizationConfig { - /** - *Whether or not to use the Amazon ECS task IAM role defined in a task definition when
- * mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the
- * EFSVolumeConfiguration
. If this parameter is omitted, the default value
- * of DISABLED
is used. For more information, see Using
- * Amazon EFS Access Points in the Amazon Elastic Container Service Developer Guide.
The Amazon EFS access point ID to use. If an access point is specified, the root directory
* value specified in the EFSVolumeConfiguration
must either be omitted or set
@@ -4932,6 +5012,15 @@ export interface EFSAuthorizationConfig {
* EFS Access Points in the Amazon Elastic File System User Guide.
Whether or not to use the Amazon ECS task IAM role defined in a task definition when
+ * mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the
+ * EFSVolumeConfiguration
. If this parameter is omitted, the default value
+ * of DISABLED
is used. For more information, see Using
+ * Amazon EFS Access Points in the Amazon Elastic Container Service Developer Guide.
The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS - * server. If you do not specify a transit encryption port, it will use the port selection - * strategy that the Amazon EFS mount helper uses. For more information, see EFS Mount - * Helper in the Amazon Elastic File System User Guide.
+ *The Amazon EFS file system ID to use.
*/ - transitEncryptionPort?: number; + fileSystemId: string | undefined; /** - *The Amazon EFS file system ID to use.
+ *The directory within the Amazon EFS file system to mount as the root directory inside the
+ * host. If this parameter is omitted, the root of the Amazon EFS volume will be used.
+ * Specifying /
will have the same effect as omitting this parameter.
If an EFS access point is specified in the authorizationConfig
, the
+ * root directory parameter must either be omitted or set to /
which will
+ * enforce the path set on the EFS access point.
Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host @@ -4974,16 +5067,12 @@ export interface EFSVolumeConfiguration { transitEncryption?: EFSTransitEncryption | string; /** - *
The directory within the Amazon EFS file system to mount as the root directory inside the
- * host. If this parameter is omitted, the root of the Amazon EFS volume will be used.
- * Specifying /
will have the same effect as omitting this parameter.
If an EFS access point is specified in the authorizationConfig
, the
- * root directory parameter must either be omitted or set to /
which will
- * enforce the path set on the EFS access point.
The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS + * server. If you do not specify a transit encryption port, it will use the port selection + * strategy that the Amazon EFS mount helper uses. For more information, see EFS Mount + * Helper in the Amazon Elastic File System User Guide.
*/ - rootDirectory?: string; + transitEncryptionPort?: number; /** *The authorization configuration details for the Amazon EFS file system.
@@ -5005,33 +5094,17 @@ export namespace EFSVolumeConfiguration { */ export interface FSxWindowsFileServerAuthorizationConfig { /** - *A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted EC2 - * AD.
+ *The authorization credential option to use. The authorization credential options can + * be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or AWS Systems Manager + * Parameter Store parameter. The ARNs refer to the stored credentials.
*/ - domain: string | undefined; + credentialsParameter: string | undefined; /** - *The authorization credential option to use.
- *The authorization credential options can be provided using either the AWS Secrets Manager ARN or - * the AWS Systems Manager ARN. The ARNs refer to the stored credentials.
- *- * options: - *
- *- * ARN of an - * AWS Secrets Manager - * secret.
- *- * ARN of an - * AWS Systems Manager parameter.
- *A fully qualified domain name hosted by an AWS Directory Service Managed Microsoft AD (Active Directory) or self-hosted AD on + * Amazon EC2.
*/ - credentialsParameter: string | undefined; + domain: string | undefined; } export namespace FSxWindowsFileServerAuthorizationConfig { @@ -5048,15 +5121,15 @@ export namespace FSxWindowsFileServerAuthorizationConfig { */ export interface FSxWindowsFileServerVolumeConfiguration { /** - *The directory within the Amazon FSx for Windows File Server file system to mount as the root directory - * inside the host.
+ *The Amazon FSx for Windows File Server file system ID to use.
*/ - rootDirectory: string | undefined; + fileSystemId: string | undefined; /** - *The Amazon FSx for Windows File Server file system ID to use.
+ *The directory within the Amazon FSx for Windows File Server file system to mount as the root directory + * inside the host.
*/ - fileSystemId: string | undefined; + rootDirectory: string | undefined; /** *The authorization configuration details for the Amazon FSx for Windows File Server file system.
@@ -5106,6 +5179,13 @@ export namespace HostVolumeProperties { * Tasks. */ export interface Volume { + /** + *The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. This name is referenced in the
+ * sourceVolume
parameter of container definition
+ * mountPoints
.
This parameter is specified when you are using bind mount host volumes. The contents
* of the host
parameter determine whether your bind mount host volume
@@ -5121,13 +5201,6 @@ export interface Volume {
*/
host?: HostVolumeProperties;
- /**
- *
The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, and hyphens are allowed. This name is referenced in the
- * sourceVolume
parameter of container definition
- * mountPoints
.
This parameter is specified when you are using Docker volumes. Docker volumes are only * supported when you are using the EC2 launch type. Windows containers only @@ -5163,53 +5236,53 @@ export namespace Volume { */ export interface TaskDefinition { /** - *
The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the - * task permission to call AWS APIs on your behalf. For more information, see Amazon ECS - * Task Role in the Amazon Elastic Container Service Developer Guide.
- *IAM roles for tasks on Windows require that the -EnableTaskIAMRole
option
- * is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some
- * configuration code in order to take advantage of the feature. For more information, see
- * Windows IAM Roles
- * for Tasks in the Amazon Elastic Container Service Developer Guide.
The full Amazon Resource Name (ARN) of the task definition.
*/ - taskRoleArn?: string; + taskDefinitionArn?: string; /** - *The revision of the task in a particular family. The revision is a version number of a
- * task definition in a family. When you register a task definition for the first time, the
- * revision is 1
. Each time that you register a new revision of a task
- * definition in the same family, the revision value always increases by one, even if you
- * have deregistered previous revisions in this family.
A list of container definitions in JSON format that describe the different containers + * that make up your task. For more information about container definition parameters and + * defaults, see Amazon ECS Task + * Definitions in the Amazon Elastic Container Service Developer Guide.
*/ - revision?: number; + containerDefinitions?: ContainerDefinition[]; /** - *The status of the task definition.
+ *The name of a family that this task definition is registered to. Up to 255 letters + * (uppercase and lowercase), numbers, hyphens, and underscores are allowed.
+ *A family groups multiple versions of a task definition. Amazon ECS gives the first task + * definition that you registered to a family a revision number of 1. Amazon ECS gives + * sequential revision numbers to each task definition that you add.
*/ - status?: TaskDefinitionStatus | string; + family?: string; /** - *The configuration details for the App Mesh proxy.
- *Your Amazon ECS container instances require at least version 1.26.0 of the container agent
- * and at least version 1.26.0-1 of the ecs-init
package to enable a proxy
- * configuration. If your container instances are launched from the Amazon ECS-optimized AMI
- * version 20190301
or later, then they contain the required versions of the
- * container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the + * task permission to call AWS APIs on your behalf. For more information, see Amazon ECS + * Task Role in the Amazon Elastic Container Service Developer Guide.
+ *IAM roles for tasks on Windows require that the -EnableTaskIAMRole
option
+ * is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some
+ * configuration code in order to take advantage of the feature. For more information, see
+ * Windows IAM Roles
+ * for Tasks in the Amazon Elastic Container Service Developer Guide.
An array of placement constraint objects to use for tasks. This field is not valid if - * you are using the Fargate launch type for your task.
+ *The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent + * permission to make AWS API calls on your behalf. The task execution IAM role is required + * depending on the requirements of your task. For more information, see Amazon ECS task + * execution IAM role in the Amazon Elastic Container Service Developer Guide.
*/ - placementConstraints?: TaskDefinitionPlacementConstraint[]; + executionRoleArn?: string; /** *The Docker networking mode to use for the containers in the task. The valid values are
* none
, bridge
, awsvpc
, and host
.
- * The default Docker network mode is bridge
. If you are using the
- * Fargate launch type, the awsvpc
network mode is required. If
- * you are using the EC2 launch type, any network mode can be used. If the network
+ * If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required.
+ * For Amazon ECS tasks on Amazon EC2 instances, any network mode can be used. If the network
* mode is set to none
, you cannot specify port mappings in your container
* definitions, and the tasks containers do not have external connectivity. The
* host
and awsvpc
network modes offer the highest networking
@@ -5220,6 +5293,11 @@ export interface TaskDefinition {
* network mode) or the attached elastic network interface port (for the
* awsvpc
network mode), so you cannot take advantage of dynamic host port
* mappings.
When using the host
network mode, you should not run
+ * containers using the root user (UID 0). It is considered best practice
+ * to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network
* interface, and you must specify a NetworkConfiguration value when you create
* a service or run a task with the task definition. For more information, see Task Networking in the
@@ -5241,20 +5319,111 @@ export interface TaskDefinition {
networkMode?: NetworkMode | string;
/**
- *
The name of a family that this task definition is registered to. Up to 255 letters - * (uppercase and lowercase), numbers, hyphens, and underscores are allowed.
- *A family groups multiple versions of a task definition. Amazon ECS gives the first task - * definition that you registered to a family a revision number of 1. Amazon ECS gives - * sequential revision numbers to each task definition that you add.
+ *The revision of the task in a particular family. The revision is a version number of a
+ * task definition in a family. When you register a task definition for the first time, the
+ * revision is 1
. Each time that you register a new revision of a task
+ * definition in the same family, the revision value always increases by one, even if you
+ * have deregistered previous revisions in this family.
The list of volume definitions for the task.
+ *If your tasks are using the Fargate launch type, the host
+ * and sourcePath
parameters are not supported.
For more information about volume definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.
+ */ + volumes?: Volume[]; + + /** + *The status of the task definition.
+ */ + status?: TaskDefinitionStatus | string; + + /** + *The container instance attributes required by your task. This field is not valid if + * you are using the Fargate launch type for your task.
+ */ + requiresAttributes?: Attribute[]; + + /** + *An array of placement constraint objects to use for tasks. This field is not valid if + * you are using the Fargate launch type for your task.
+ */ + placementConstraints?: TaskDefinitionPlacementConstraint[]; + + /** + *The launch type to use with your task. For more information, see Amazon ECS + * Launch Types in the Amazon Elastic Container Service Developer Guide.
+ */ + compatibilities?: (Compatibility | string)[]; + + /** + *The launch type the task requires. If no value is specified, it will default to
+ * EC2
. Valid values include EC2
and
+ * FARGATE
.
The number of cpu
units used by the task. If you are using the EC2 launch
+ * type, this field is optional and any value can be used. If you are using the Fargate
+ * launch type, this field is required and you must use one of the following values, which
+ * determines your range of valid values for the memory
parameter:
256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
The amount (in MiB) of memory used by the task.
+ *If using the EC2 launch type, you must specify either a task-level + * memory value or a container-level memory value. This field is optional and any value can + * be used. If a task-level memory value is specified then the container-level memory value + * is optional. For more information regarding container-level memory and memory + * reservation, see ContainerDefinition.
+ *If using the Fargate launch type, this field is required and you must
+ * use one of the following values, which determines your range of valid values for the
+ * cpu
parameter:
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
The launch type the task requires. If no value is specified, it will default to
- * EC2
. Valid values include EC2
and
- * FARGATE
.
The Elastic Inference accelerator associated with the task.
*/ - requiresCompatibilities?: (Compatibility | string)[]; + inferenceAccelerators?: InferenceAccelerator[]; /** *The process namespace to use for the containers in the task. The valid @@ -5276,30 +5445,6 @@ export interface TaskDefinition { */ pidMode?: PidMode | string; - /** - *
The full Amazon Resource Name (ARN) of the task definition.
- */ - taskDefinitionArn?: string; - - /** - *The launch type to use with your task. For more information, see Amazon ECS - * Launch Types in the Amazon Elastic Container Service Developer Guide.
- */ - compatibilities?: (Compatibility | string)[]; - - /** - *A list of container definitions in JSON format that describe the different containers - * that make up your task. For more information about container definition parameters and - * defaults, see Amazon ECS Task - * Definitions in the Amazon Elastic Container Service Developer Guide.
- */ - containerDefinitions?: ContainerDefinition[]; - - /** - *The Elastic Inference accelerator associated with the task.
- */ - inferenceAccelerators?: InferenceAccelerator[]; - /** *The IPC resource namespace to use for the containers in the task. The valid values are
* host
, task
, or none
. If host
is
@@ -5337,81 +5482,14 @@ export interface TaskDefinition {
ipcMode?: IpcMode | string;
/**
- *
The amount (in MiB) of memory used by the task.
- *If using the EC2 launch type, you must specify either a task-level - * memory value or a container-level memory value. This field is optional and any value can - * be used. If a task-level memory value is specified then the container-level memory value - * is optional. For more information regarding container-level memory and memory - * reservation, see ContainerDefinition.
- *If using the Fargate launch type, this field is required and you must
- * use one of the following values, which determines your range of valid values for the
- * cpu
parameter:
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
The container instance attributes required by your task. This field is not valid if - * you are using the Fargate launch type for your task.
- */ - requiresAttributes?: Attribute[]; - - /** - *The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent - * permission to make AWS API calls on your behalf. The task execution IAM role is required - * depending on the requirements of your task. For more information, see Amazon ECS task - * execution IAM role in the Amazon Elastic Container Service Developer Guide.
- */ - executionRoleArn?: string; - - /** - *The number of cpu
units used by the task. If you are using the EC2 launch
- * type, this field is optional and any value can be used. If you are using the Fargate
- * launch type, this field is required and you must use one of the following values, which
- * determines your range of valid values for the memory
parameter:
256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
The list of volume definitions for the task.
- *If your tasks are using the Fargate launch type, the host
- * and sourcePath
parameters are not supported.
For more information about volume definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.
+ *The configuration details for the App Mesh proxy.
+ *Your Amazon ECS container instances require at least version 1.26.0 of the container agent
+ * and at least version 1.26.0-1 of the ecs-init
package to enable a proxy
+ * configuration. If your container instances are launched from the Amazon ECS-optimized AMI
+ * version 20190301
or later, then they contain the required versions of the
+ * container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The short name or full Amazon Resource Name (ARN) of one or more capacity providers. Up to
+ * 100
capacity providers can be described in an action.
Specifies whether or not you want to see the resource tags for the capacity provider.
+ * If TAGS
is specified, the tags are included in the response. If this field
+ * is omitted, tags are not included in the response.
The maximum number of account setting results returned by
* DescribeCapacityProviders
in paginated output. When this parameter is
@@ -5453,12 +5544,6 @@ export interface DescribeCapacityProvidersRequest {
*/
maxResults?: number;
- /**
- *
The short name or full Amazon Resource Name (ARN) of one or more capacity providers. Up to
- * 100
capacity providers can be described in an action.
The nextToken
value returned from a previous paginated
* DescribeCapacityProviders
request where maxResults
was
@@ -5470,13 +5555,6 @@ export interface DescribeCapacityProvidersRequest {
*
*/
nextToken?: string;
-
- /**
- *
Specifies whether or not you want to see the resource tags for the capacity provider.
- * If TAGS
is specified, the tags are included in the response. If this field
- * is omitted, tags are not included in the response.
The Amazon Resource Name (ARN) of the failed resource.
+ */ + arn?: string; + /** *The reason for the failure.
*/ @@ -5499,11 +5582,6 @@ export interface Failure { *The details of the failure.
*/ detail?: string; - - /** - *The Amazon Resource Name (ARN) of the failed resource.
- */ - arn?: string; } export namespace Failure { @@ -5518,6 +5596,11 @@ export interface DescribeCapacityProvidersResponse { */ capacityProviders?: CapacityProvider[]; + /** + *Any failures associated with the call.
+ */ + failures?: Failure[]; + /** *The nextToken
value to include in a future
* DescribeCapacityProviders
request. When the results of a
@@ -5526,11 +5609,6 @@ export interface DescribeCapacityProvidersResponse {
* when there are no more results to return.
Any failures associated with the call.
- */ - failures?: Failure[]; } export namespace DescribeCapacityProvidersResponse { @@ -5602,14 +5680,14 @@ export namespace DescribeClustersRequest { export interface DescribeClustersResponse { /** - *Any failures associated with the call.
+ *The list of clusters.
*/ - failures?: Failure[]; + clusters?: Cluster[]; /** - *The list of clusters.
+ *Any failures associated with the call.
*/ - clusters?: Cluster[]; + failures?: Failure[]; } export namespace DescribeClustersResponse { @@ -5623,13 +5701,6 @@ export enum ContainerInstanceField { } export interface DescribeContainerInstancesRequest { - /** - *Specifies whether you want to see the resource tags for the container instance. If
- * TAGS
is specified, the tags are included in the response. If this field
- * is omitted, tags are not included in the response.
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to * describe. If you do not specify a cluster, the default cluster is assumed. This parameter is required if the container instance @@ -5642,6 +5713,13 @@ export interface DescribeContainerInstancesRequest { *
A list of up to 100 container instance IDs or full Amazon Resource Name (ARN) entries.
*/ containerInstances: string[] | undefined; + + /** + *Specifies whether you want to see the resource tags for the container instance. If
+ * TAGS
is specified, the tags are included in the response. If this field
+ * is omitted, tags are not included in the response.
The short name or full Amazon Resource Name (ARN)the cluster that hosts the service to describe. + * If you do not specify a cluster, the default cluster is assumed. This parameter is required if the service or services you are + * describing were launched in any cluster other than the default cluster.
+ */ + cluster?: string; + /** *A list of services to describe. You may specify up to 10 services to describe in a * single operation.
@@ -5685,13 +5770,6 @@ export interface DescribeServicesRequest { * is omitted, tags are not included in the response. */ include?: (ServiceField | string)[]; - - /** - *The short name or full Amazon Resource Name (ARN)the cluster that hosts the service to describe. - * If you do not specify a cluster, the default cluster is assumed. This parameter is required if the service or services you are - * describing were launched in any cluster other than the default cluster.
- */ - cluster?: string; } export namespace DescribeServicesRequest { @@ -5723,13 +5801,6 @@ export enum TaskDefinitionField { } export interface DescribeTaskDefinitionRequest { - /** - *Specifies whether to see the resource tags for the task definition. If
- * TAGS
is specified, the tags are included in the response. If this field
- * is omitted, tags are not included in the response.
The family
for the latest ACTIVE
revision,
* family
and revision
(family:revision
) for a
@@ -5737,6 +5808,13 @@ export interface DescribeTaskDefinitionRequest {
* describe.
Specifies whether to see the resource tags for the task definition. If
+ * TAGS
is specified, the tags are included in the response. If this field
+ * is omitted, tags are not included in the response.
The port number on the host that is used with the network binding.
+ *The IP address that the container is bound to on the container instance.
*/ - hostPort?: number; + bindIP?: string; /** *The port number on the container that is used with the network binding.
@@ -5856,9 +5934,9 @@ export interface NetworkBinding { containerPort?: number; /** - *The IP address that the container is bound to on the container instance.
+ *The port number on the host that is used with the network binding.
*/ - bindIP?: string; + hostPort?: number; /** *The protocol used for the network binding.
@@ -5877,11 +5955,6 @@ export namespace NetworkBinding { *awsvpc
network mode.
*/
export interface NetworkInterface {
- /**
- * The private IPv6 address for the network interface.
- */ - ipv6Address?: string; - /** *The attachment ID for the network interface.
*/ @@ -5891,6 +5964,11 @@ export interface NetworkInterface { *The private IPv4 address for the network interface.
*/ privateIpv4Address?: string; + + /** + *The private IPv6 address for the network interface.
+ */ + ipv6Address?: string; } export namespace NetworkInterface { @@ -5904,38 +5982,25 @@ export namespace NetworkInterface { */ export interface Container { /** - *The network bindings associated with the container.
- */ - networkBindings?: NetworkBinding[]; - - /** - *The soft limit (in MiB) of memory set for the container.
+ *The Amazon Resource Name (ARN) of the container.
*/ - memoryReservation?: string; + containerArn?: string; /** - *The number of CPU units set for the container. The value will be 0
if no
- * value was specified in the container definition when the task definition was
- * registered.
The ARN of the task.
*/ - cpu?: string; + taskArn?: string; /** - *The hard limit (in MiB) of memory set for the container.
+ *The name of the container.
*/ - memory?: string; + name?: string; /** *The image used for the container.
*/ image?: string; - /** - *A short (255 max characters) human-readable string to provide additional details about - * a running or stopped container.
- */ - reason?: string; - /** *The container image manifest digest.
*The IDs of each GPU assigned to the container.
+ *The ID of the Docker container.
*/ - gpuIds?: string[]; + runtimeId?: string; /** - *The ID of the Docker container.
+ *The last known status of the container.
*/ - runtimeId?: string; + lastStatus?: string; /** - *The Amazon Resource Name (ARN) of the container.
+ *The exit code returned from the container.
*/ - containerArn?: string; + exitCode?: number; /** - *The health status of the container. If health checks are not configured for this
- * container in its task definition, then it reports the health status as
- * UNKNOWN
.
A short (255 max characters) human-readable string to provide additional details about + * a running or stopped container.
*/ - healthStatus?: HealthStatus | string; + reason?: string; /** - *The ARN of the task.
+ *The network bindings associated with the container.
*/ - taskArn?: string; + networkBindings?: NetworkBinding[]; /** *The network interfaces associated with the container.
@@ -5978,19 +6042,33 @@ export interface Container { networkInterfaces?: NetworkInterface[]; /** - *The name of the container.
+ *The health status of the container. If health checks are not configured for this
+ * container in its task definition, then it reports the health status as
+ * UNKNOWN
.
The last known status of the container.
+ *The number of CPU units set for the container. The value will be 0
if no
+ * value was specified in the container definition when the task definition was
+ * registered.
The exit code returned from the container.
+ *The hard limit (in MiB) of memory set for the container.
*/ - exitCode?: number; + memory?: string; + + /** + *The soft limit (in MiB) of memory set for the container.
+ */ + memoryReservation?: string; + + /** + *The IDs of each GPU assigned to the container.
+ */ + gpuIds?: string[]; } export namespace Container { @@ -6007,10 +6085,10 @@ export namespace Container { */ export interface ContainerOverride { /** - *The number of cpu
units reserved for the container, instead of the
- * default value from the task definition. You must also specify a container name.
The name of the container that receives the override. This parameter is required if + * any override is specified.
*/ - cpu?: number; + name?: string; /** *The command to send to the container that overrides the default command from the @@ -6019,22 +6097,12 @@ export interface ContainerOverride { command?: string[]; /** - *
The soft limit (in MiB) of memory to reserve for the container, instead of the default - * value from the task definition. You must also specify a container name.
- */ - memoryReservation?: number; - - /** - *The type and amount of a resource to assign to a container, instead of the default - * value from the task definition. The only supported resource is a GPU.
- */ - resourceRequirements?: ResourceRequirement[]; - - /** - *The name of the container that receives the override. This parameter is required if - * any override is specified.
+ *The environment variables to send to the container. You can add new environment + * variables, which are added to the container at launch, or you can override the existing + * environment variables from the Docker image or the task definition. You must also + * specify a container name.
*/ - name?: string; + environment?: KeyValuePair[]; /** *A list of files containing the environment variables to pass to a container, instead @@ -6042,6 +6110,12 @@ export interface ContainerOverride { */ environmentFiles?: EnvironmentFile[]; + /** + *
The number of cpu
units reserved for the container, instead of the
+ * default value from the task definition. You must also specify a container name.
The hard limit (in MiB) of memory to present to the container, instead of the default * value from the task definition. If your container attempts to exceed the memory @@ -6050,12 +6124,16 @@ export interface ContainerOverride { memory?: number; /** - *
The environment variables to send to the container. You can add new environment - * variables, which are added to the container at launch, or you can override the existing - * environment variables from the Docker image or the task definition. You must also - * specify a container name.
+ *The soft limit (in MiB) of memory to reserve for the container, instead of the default + * value from the task definition. You must also specify a container name.
*/ - environment?: KeyValuePair[]; + memoryReservation?: number; + + /** + *The type and amount of a resource to assign to a container, instead of the default + * value from the task definition. The only supported resource is a GPU.
+ */ + resourceRequirements?: ResourceRequirement[]; } export namespace ContainerOverride { @@ -6099,9 +6177,14 @@ export interface TaskOverride { containerOverrides?: ContainerOverride[]; /** - *The memory override for the task.
+ *The cpu override for the task.
*/ - memory?: string; + cpu?: string; + + /** + *The Elastic Inference accelerator override for the task.
+ */ + inferenceAcceleratorOverrides?: InferenceAcceleratorOverride[]; /** *The Amazon Resource Name (ARN) of the task execution IAM role override for the task.
@@ -6109,20 +6192,15 @@ export interface TaskOverride { executionRoleArn?: string; /** - *The cpu override for the task.
+ *The memory override for the task.
*/ - cpu?: string; + memory?: string; /** *The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers * in this task are granted the permissions that are specified in this role.
*/ taskRoleArn?: string; - - /** - *The Elastic Inference accelerator override for the task.
- */ - inferenceAcceleratorOverrides?: InferenceAcceleratorOverride[]; } export namespace TaskOverride { @@ -6142,23 +6220,20 @@ export enum TaskStopCode { */ export interface Task { /** - *The Amazon Resource Name (ARN) of the task.
+ *The Elastic Network Adapter associated with the task if the task uses the
+ * awsvpc
network mode.
The platform version on which your task is running. A platform version is only
- * specified for tasks using the Fargate launch type. If one is not
- * specified, the LATEST
platform version is used by default. For more
- * information, see AWS Fargate Platform
- * Versions in the Amazon Elastic Container Service Developer Guide.
The attributes of the task
*/ - platformVersion?: string; + attributes?: Attribute[]; /** - *The ARN of the cluster that hosts the task.
+ *The availability zone of the task.
*/ - clusterArn?: string; + availabilityZone?: string; /** *The capacity provider associated with the task.
@@ -6166,20 +6241,30 @@ export interface Task { capacityProviderName?: string; /** - *The Unix timestamp for when the task execution stopped.
+ *The ARN of the cluster that hosts the task.
*/ - executionStoppedAt?: Date; + clusterArn?: string; /** - *The availability zone of the task.
+ *The connectivity status of a task.
*/ - availabilityZone?: string; + connectivity?: Connectivity | string; /** - *The Unix timestamp for when the task started (the task transitioned from the
- * PENDING
state to the RUNNING
state).
The Unix timestamp for when the task last went into CONNECTED
+ * status.
The ARN of the container instances that host the task.
+ */ + containerInstanceArn?: string; + + /** + *The containers associated with the task.
+ */ + containers?: Container[]; /** *The number of CPU units used by the task as expressed in a task definition. It can be @@ -6211,51 +6296,123 @@ export interface Task { * * */ - cpu?: string; - - /** - *
The Unix timestamp for when the container image pull began.
- */ - pullStartedAt?: Date; - - /** - *The Unix timestamp for when the task was created (the task entered the
- * PENDING
state).
The Elastic Inference accelerator associated with the task.
- */ - inferenceAccelerators?: InferenceAccelerator[]; + cpu?: string; + + /** + *The Unix timestamp for when the task was created (the task entered the
+ * PENDING
state).
The desired status of the task. For more information, see Task + * Lifecycle.
+ */ + desiredStatus?: string; + + /** + *The Unix timestamp for when the task execution stopped.
+ */ + executionStoppedAt?: Date; + + /** + *The name of the task group associated with the task.
+ */ + group?: string; + + /** + *The health status for the task, which is determined by the health of the essential
+ * containers in the task. If all essential containers in the task are reporting as
+ * HEALTHY
, then the task status also reports as HEALTHY
. If
+ * any essential containers in the task are reporting as UNHEALTHY
or
+ * UNKNOWN
, then the task status also reports as UNHEALTHY
or
+ * UNKNOWN
, accordingly.
The Amazon ECS container agent does not monitor or report on Docker health checks that + * are embedded in a container image (such as those specified in a parent image or from + * the image's Dockerfile) and not specified in the container definition. Health check + * parameters that are specified in a container definition override any Docker health + * checks that exist in the container image.
+ *The Elastic Inference accelerator associated with the task.
+ */ + inferenceAccelerators?: InferenceAccelerator[]; + + /** + *The last known status of the task. For more information, see Task + * Lifecycle.
+ */ + lastStatus?: string; + + /** + *The launch type on which your task is running. For more information, see Amazon ECS + * Launch Types in the Amazon Elastic Container Service Developer Guide.
+ */ + launchType?: LaunchType | string; + + /** + *The amount of memory (in MiB) used by the task as expressed in a task definition. It
+ * can be expressed as an integer using MiB, for example 1024
. It can also be
+ * expressed as a string using GB, for example 1GB
or 1 GB
.
+ * String values are converted to an integer indicating the MiB when the task definition is
+ * registered.
If you are using the EC2 launch type, this field is optional.
+ *If you are using the Fargate launch type, this field is required and you
+ * must use one of the following values, which determines your range of supported values
+ * for the cpu
parameter:
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
The ARN of the container instances that host the task.
+ *One or more container overrides.
*/ - containerInstanceArn?: string; + overrides?: TaskOverride; /** - *The launch type on which your task is running. For more information, see Amazon ECS - * Launch Types in the Amazon Elastic Container Service Developer Guide.
+ *The platform version on which your task is running. A platform version is only
+ * specified for tasks using the Fargate launch type. If one is not
+ * specified, the LATEST
platform version is used by default. For more
+ * information, see AWS Fargate Platform
+ * Versions in the Amazon Elastic Container Service Developer Guide.
The Elastic Network Adapter associated with the task if the task uses the
- * awsvpc
network mode.
The Unix timestamp for when the container image pull began.
*/ - attachments?: Attachment[]; + pullStartedAt?: Date; /** - *The name of the task group associated with the task.
+ *The Unix timestamp for when the container image pull completed.
*/ - group?: string; + pullStoppedAt?: Date; /** - *The desired status of the task. For more information, see Task - * Lifecycle.
+ *The Unix timestamp for when the task started (the task transitioned from the
+ * PENDING
state to the RUNNING
state).
The tag specified when a task is started. If the task is started by an Amazon ECS service, @@ -6265,47 +6422,27 @@ export interface Task { startedBy?: string; /** - *
The Unix timestamp for when the container image pull completed.
- */ - pullStoppedAt?: Date; - - /** - *The connectivity status of a task.
+ *The stop code indicating why a task was stopped. The stoppedReason
may
+ * contain additional details.
The Unix timestamp for when the task last went into CONNECTED
- * status.
The Unix timestamp for when the task was stopped (the task transitioned from the
+ * RUNNING
state to the STOPPED
state).
The health status for the task, which is determined by the health of the essential
- * containers in the task. If all essential containers in the task are reporting as
- * HEALTHY
, then the task status also reports as HEALTHY
. If
- * any essential containers in the task are reporting as UNHEALTHY
or
- * UNKNOWN
, then the task status also reports as UNHEALTHY
or
- * UNKNOWN
, accordingly.
The Amazon ECS container agent does not monitor or report on Docker health checks that - * are embedded in a container image (such as those specified in a parent image or from - * the image's Dockerfile) and not specified in the container definition. Health check - * parameters that are specified in a container definition override any Docker health - * checks that exist in the container image.
- *The reason that the task was stopped.
*/ - healthStatus?: HealthStatus | string; + stoppedReason?: string; /** - *The version counter for the task. Every time a task experiences a change that triggers
- * a CloudWatch event, the version counter is incremented. If you are replicating your Amazon ECS task
- * state with CloudWatch Events, you can compare the version of a task reported by the Amazon ECS API
- * actions with the version reported in CloudWatch Events for the task (inside the
- * detail
object) to verify that the version in your event stream is
- * current.
The Unix timestamp for when the task stops (transitions from the RUNNING
+ * state to STOPPED
).
The metadata that you apply to the task to help you categorize and organize them. Each @@ -6345,30 +6482,9 @@ export interface Task { tags?: Tag[]; /** - *
One or more container overrides.
- */ - overrides?: TaskOverride; - - /** - *The Unix timestamp for when the task was stopped (the task transitioned from the
- * RUNNING
state to the STOPPED
state).
The attributes of the task
- */ - attributes?: Attribute[]; - - /** - *The reason that the task was stopped.
- */ - stoppedReason?: string; - - /** - *The containers associated with the task.
+ *The Amazon Resource Name (ARN) of the task.
*/ - containers?: Container[]; + taskArn?: string; /** *The ARN of the task definition that creates the task.
@@ -6376,52 +6492,14 @@ export interface Task { taskDefinitionArn?: string; /** - *The amount of memory (in MiB) used by the task as expressed in a task definition. It
- * can be expressed as an integer using MiB, for example 1024
. It can also be
- * expressed as a string using GB, for example 1GB
or 1 GB
.
- * String values are converted to an integer indicating the MiB when the task definition is
- * registered.
If you are using the EC2 launch type, this field is optional.
- *If you are using the Fargate launch type, this field is required and you
- * must use one of the following values, which determines your range of supported values
- * for the cpu
parameter:
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
The last known status of the task. For more information, see Task - * Lifecycle.
- */ - lastStatus?: string; - - /** - *The Unix timestamp for when the task stops (transitions from the RUNNING
- * state to STOPPED
).
The stop code indicating why a task was stopped. The stoppedReason
may
- * contain additional details.
The version counter for the task. Every time a task experiences a change that triggers
+ * a CloudWatch event, the version counter is incremented. If you are replicating your Amazon ECS task
+ * state with CloudWatch Events, you can compare the version of a task reported by the Amazon ECS API
+ * actions with the version reported in CloudWatch Events for the task (inside the
+ * detail
object) to verify that the version in your event stream is
+ * current.
The ID or full Amazon Resource Name (ARN) of task sets to - * describe.
- */ - taskSets?: string[]; - /** *The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task * sets exist in.
@@ -6470,6 +6542,12 @@ export interface DescribeTaskSetsRequest { */ service: string | undefined; + /** + *The ID or full Amazon Resource Name (ARN) of task sets to + * describe.
+ */ + taskSets?: string[]; + /** *Specifies whether to see the resource tags for the task set. If TAGS
is
* specified, the tags are included in the response. If this field is omitted, tags are not
@@ -6503,17 +6581,17 @@ export namespace DescribeTaskSetsResponse {
}
export interface DiscoverPollEndpointRequest {
- /**
- *
The short name or full Amazon Resource Name (ARN) of the cluster to which the container instance - * belongs.
- */ - cluster?: string; - /** *The container instance ID or full ARN of the container instance.
* The ARN contains the arn:aws:ecs
namespace, followed by the Region of the container instance, the AWS account ID of the container instance owner, the container-instance
namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID
.
The short name or full Amazon Resource Name (ARN) of the cluster to which the container instance + * belongs.
+ */ + cluster?: string; } export namespace DiscoverPollEndpointRequest { @@ -6542,16 +6620,9 @@ export namespace DiscoverPollEndpointResponse { export interface ListAccountSettingsRequest { /** - *The nextToken
value returned from a ListAccountSettings
- * request indicating that more results are available to fulfill the request and further
- * calls will be needed. If maxResults
was provided, it is possible the number
- * of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to - * retrieve the next items in a list and not for other programmatic purposes.
- *The name of the account setting you want to list the settings for.
*/ - nextToken?: string; + name?: SettingName | string; /** *The value of the account settings with which to filter results. You must also specify @@ -6560,9 +6631,11 @@ export interface ListAccountSettingsRequest { value?: string; /** - *
The name of the account setting you want to list the settings for.
+ *The ARN of the principal, which can be an IAM user, IAM role, or the root user. If + * this field is omitted, the account settings are listed only for the authenticated + * user.
*/ - name?: SettingName | string; + principalArn?: string; /** *Specifies whether to return the effective settings. If true
, the account
@@ -6572,6 +6645,18 @@ export interface ListAccountSettingsRequest {
*/
effectiveSettings?: boolean;
+ /**
+ *
The nextToken
value returned from a ListAccountSettings
+ * request indicating that more results are available to fulfill the request and further
+ * calls will be needed. If maxResults
was provided, it is possible the number
+ * of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to + * retrieve the next items in a list and not for other programmatic purposes.
+ *The maximum number of account setting results returned by
* ListAccountSettings
in paginated output. When this parameter is used,
@@ -6585,13 +6670,6 @@ export interface ListAccountSettingsRequest {
* if applicable.
The ARN of the principal, which can be an IAM user, IAM role, or the root user. If - * this field is omitted, the account settings are listed only for the authenticated - * user.
- */ - principalArn?: string; } export namespace ListAccountSettingsRequest { @@ -6624,33 +6702,20 @@ export namespace ListAccountSettingsResponse { export interface ListAttributesRequest { /** - *The type of the target with which to list attributes.
+ *The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. + * If you do not specify a cluster, the default cluster is assumed.
*/ - targetType: TargetType | string | undefined; + cluster?: string; /** - *The maximum number of cluster results returned by ListAttributes
in
- * paginated output. When this parameter is used, ListAttributes
only returns
- * maxResults
results in a single page along with a nextToken
- * response element. The remaining results of the initial request can be seen by sending
- * another ListAttributes
request with the returned nextToken
- * value. This value can be between 1 and 100. If this
- * parameter is not used, then ListAttributes
returns up to
- * 100 results and a nextToken
value if applicable.
The type of the target with which to list attributes.
*/ - maxResults?: number; + targetType: TargetType | string | undefined; /** - *The nextToken
value returned from a ListAttributes
request
- * indicating that more results are available to fulfill the request and further calls will
- * be needed. If maxResults
was provided, it is possible the number of results
- * to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to - * retrieve the next items in a list and not for other programmatic purposes.
- *The name of the attribute with which to filter the results.
*/ - nextToken?: string; + attributeName?: string; /** *The value of the attribute with which to filter results. You must also specify an @@ -6658,16 +6723,29 @@ export interface ListAttributesRequest { */ attributeValue?: string; - /** - *
The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. - * If you do not specify a cluster, the default cluster is assumed.
+ /** + *The nextToken
value returned from a ListAttributes
request
+ * indicating that more results are available to fulfill the request and further calls will
+ * be needed. If maxResults
was provided, it is possible the number of results
+ * to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to + * retrieve the next items in a list and not for other programmatic purposes.
+ *The name of the attribute with which to filter the results.
+ *The maximum number of cluster results returned by ListAttributes
in
+ * paginated output. When this parameter is used, ListAttributes
only returns
+ * maxResults
results in a single page along with a nextToken
+ * response element. The remaining results of the initial request can be seen by sending
+ * another ListAttributes
request with the returned nextToken
+ * value. This value can be between 1 and 100. If this
+ * parameter is not used, then ListAttributes
returns up to
+ * 100 results and a nextToken
value if applicable.
The list of full Amazon Resource Name (ARN) entries for each cluster associated with your + * account.
+ */ + clusterArns?: string[]; + /** *The nextToken
value to include in a future ListClusters
* request. When the results of a ListClusters
request exceed
@@ -6739,12 +6823,6 @@ export interface ListClustersResponse {
* return.
The list of full Amazon Resource Name (ARN) entries for each cluster associated with your - * account.
- */ - clusterArns?: string[]; } export namespace ListClustersResponse { @@ -6762,6 +6840,12 @@ export enum ContainerInstanceStatus { } export interface ListContainerInstancesRequest { + /** + *The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to + * list. If you do not specify a cluster, the default cluster is assumed.
+ */ + cluster?: string; + /** *You can filter the results of a ListContainerInstances
operation with
* cluster query language statements. For more information, see Cluster Query Language in the
@@ -6769,12 +6853,6 @@ export interface ListContainerInstancesRequest {
*/
filter?: string;
- /**
- *
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to - * list. If you do not specify a cluster, the default cluster is assumed.
- */ - cluster?: string; - /** *The nextToken
value returned from a ListContainerInstances
* request indicating that more results are available to fulfill the request and further
@@ -6817,6 +6895,12 @@ export namespace ListContainerInstancesRequest {
}
export interface ListContainerInstancesResponse {
+ /**
+ *
The list of container instances with full ARN entries for each container instance + * associated with the specified cluster.
+ */ + containerInstanceArns?: string[]; + /** *The nextToken
value to include in a future
* ListContainerInstances
request. When the results of a
@@ -6825,12 +6909,6 @@ export interface ListContainerInstancesResponse {
* when there are no more results to return.
The list of container instances with full ARN entries for each container instance - * associated with the specified cluster.
- */ - containerInstanceArns?: string[]; } export namespace ListContainerInstancesResponse { @@ -6840,6 +6918,12 @@ export namespace ListContainerInstancesResponse { } export interface ListServicesRequest { + /** + *The short name or full Amazon Resource Name (ARN) of the cluster that hosts the services to list. + * If you do not specify a cluster, the default cluster is assumed.
+ */ + cluster?: string; + /** *The nextToken
value returned from a ListServices
request
* indicating that more results are available to fulfill the request and further calls will
@@ -6852,16 +6936,6 @@ export interface ListServicesRequest {
*/
nextToken?: string;
- /**
- *
The scheduling strategy for services to list.
- */ - schedulingStrategy?: SchedulingStrategy | string; - - /** - *The launch type for the services to list.
- */ - launchType?: LaunchType | string; - /** *The maximum number of service results returned by ListServices
in
* paginated output. When this parameter is used, ListServices
only returns
@@ -6876,10 +6950,14 @@ export interface ListServicesRequest {
maxResults?: number;
/**
- *
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the services to list. - * If you do not specify a cluster, the default cluster is assumed.
+ *The launch type for the services to list.
*/ - cluster?: string; + launchType?: LaunchType | string; + + /** + *The scheduling strategy for services to list.
+ */ + schedulingStrategy?: SchedulingStrategy | string; } export namespace ListServicesRequest { @@ -6947,18 +7025,12 @@ export enum TaskDefinitionFamilyStatus { export interface ListTaskDefinitionFamiliesRequest { /** - *The maximum number of task definition family results returned by
- * ListTaskDefinitionFamilies
in paginated output. When this parameter is
- * used, ListTaskDefinitions
only returns maxResults
results in a
- * single page along with a nextToken
response element. The remaining results
- * of the initial request can be seen by sending another
- * ListTaskDefinitionFamilies
request with the returned
- * nextToken
value. This value can be between 1 and
- * 100. If this parameter is not used, then
- * ListTaskDefinitionFamilies
returns up to 100 results
- * and a nextToken
value if applicable.
The familyPrefix
is a string that is used to filter the results of
+ * ListTaskDefinitionFamilies
. If you specify a familyPrefix
,
+ * only task definition family names that begin with the familyPrefix
string
+ * are returned.
The task definition family status with which to filter the @@ -6973,14 +7045,6 @@ export interface ListTaskDefinitionFamiliesRequest { */ status?: TaskDefinitionFamilyStatus | string; - /** - *
The familyPrefix
is a string that is used to filter the results of
- * ListTaskDefinitionFamilies
. If you specify a familyPrefix
,
- * only task definition family names that begin with the familyPrefix
string
- * are returned.
The nextToken
value returned from a
* ListTaskDefinitionFamilies
request indicating that more results are
@@ -6993,6 +7057,20 @@ export interface ListTaskDefinitionFamiliesRequest {
*
The maximum number of task definition family results returned by
+ * ListTaskDefinitionFamilies
in paginated output. When this parameter is
+ * used, ListTaskDefinitions
only returns maxResults
results in a
+ * single page along with a nextToken
response element. The remaining results
+ * of the initial request can be seen by sending another
+ * ListTaskDefinitionFamilies
request with the returned
+ * nextToken
value. This value can be between 1 and
+ * 100. If this parameter is not used, then
+ * ListTaskDefinitionFamilies
returns up to 100 results
+ * and a nextToken
value if applicable.
The list of task definition family names that match the
+ * ListTaskDefinitionFamilies
request.
The nextToken
value to include in a future
* ListTaskDefinitionFamilies
request. When the results of a
@@ -7010,12 +7094,6 @@ export interface ListTaskDefinitionFamiliesResponse {
* when there are no more results to return.
The list of task definition family names that match the
- * ListTaskDefinitionFamilies
request.
The maximum number of task definition results returned by
- * ListTaskDefinitions
in paginated output. When this parameter is used,
- * ListTaskDefinitions
only returns maxResults
results in a
- * single page along with a nextToken
response element. The remaining results
- * of the initial request can be seen by sending another ListTaskDefinitions
- * request with the returned nextToken
value. This value can be between
- * 1 and 100. If this parameter is not used, then
- * ListTaskDefinitions
returns up to 100 results and a
- * nextToken
value if applicable.
The full family name with which to filter the ListTaskDefinitions
* results. Specifying a familyPrefix
limits the listed task definitions to
@@ -7051,16 +7116,14 @@ export interface ListTaskDefinitionsRequest {
familyPrefix?: string;
/**
- *
The nextToken
value returned from a ListTaskDefinitions
- * request indicating that more results are available to fulfill the request and further
- * calls will be needed. If maxResults
was provided, it is possible the number
- * of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to - * retrieve the next items in a list and not for other programmatic purposes.
- *The task definition status with which to filter the ListTaskDefinitions
+ * results. By default, only ACTIVE
task definitions are listed. By setting
+ * this parameter to INACTIVE
, you can view task definitions that are
+ * INACTIVE
as long as an active task or service still references them. If
+ * you paginate the resulting output, be sure to keep the status
value
+ * constant in each subsequent request.
The order in which to sort the results. Valid values are ASC
and
@@ -7073,14 +7136,29 @@ export interface ListTaskDefinitionsRequest {
sort?: SortOrder | string;
/**
- *
The task definition status with which to filter the ListTaskDefinitions
- * results. By default, only ACTIVE
task definitions are listed. By setting
- * this parameter to INACTIVE
, you can view task definitions that are
- * INACTIVE
as long as an active task or service still references them. If
- * you paginate the resulting output, be sure to keep the status
value
- * constant in each subsequent request.
The nextToken
value returned from a ListTaskDefinitions
+ * request indicating that more results are available to fulfill the request and further
+ * calls will be needed. If maxResults
was provided, it is possible the number
+ * of results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to + * retrieve the next items in a list and not for other programmatic purposes.
+ *The maximum number of task definition results returned by
+ * ListTaskDefinitions
in paginated output. When this parameter is used,
+ * ListTaskDefinitions
only returns maxResults
results in a
+ * single page along with a nextToken
response element. The remaining results
+ * of the initial request can be seen by sending another ListTaskDefinitions
+ * request with the returned nextToken
value. This value can be between
+ * 1 and 100. If this parameter is not used, then
+ * ListTaskDefinitions
returns up to 100 results and a
+ * nextToken
value if applicable.
The launch type for services to list.
- */ - launchType?: LaunchType | string; - - /** - *The name of the service with which to filter the ListTasks
results.
- * Specifying a serviceName
limits the results to tasks that belong to that
- * service.
The container instance ID or full ARN of the container instance with which to filter
+ * the ListTasks
results. Specifying a containerInstance
limits
+ * the results to tasks that belong to that container instance.
The name of the family with which to filter the ListTasks
results.
@@ -7145,20 +7218,28 @@ export interface ListTasksRequest {
family?: string;
/**
- *
The task desired status with which to filter the ListTasks
results.
- * Specifying a desiredStatus
of STOPPED
limits the results to
- * tasks that Amazon ECS has set the desired status to STOPPED
. This can be useful
- * for debugging tasks that are not starting properly or have died or finished. The default
- * status filter is RUNNING
, which shows tasks that Amazon ECS has set the desired
- * status to RUNNING
.
Although you can filter results based on a desired status of PENDING
,
- * this does not return any results. Amazon ECS never sets the desired status of a task to
- * that value (only a task's lastStatus
may have a value of
- * PENDING
).
The nextToken
value returned from a ListTasks
request
+ * indicating that more results are available to fulfill the request and further calls will
+ * be needed. If maxResults
was provided, it is possible the number of results
+ * to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to + * retrieve the next items in a list and not for other programmatic purposes.
+ *The maximum number of task results returned by ListTasks
in paginated
+ * output. When this parameter is used, ListTasks
only returns
+ * maxResults
results in a single page along with a nextToken
+ * response element. The remaining results of the initial request can be seen by sending
+ * another ListTasks
request with the returned nextToken
value.
+ * This value can be between 1 and 100. If this parameter is
+ * not used, then ListTasks
returns up to 100 results and a
+ * nextToken
value if applicable.
The startedBy
value with which to filter the task results. Specifying a
@@ -7168,35 +7249,32 @@ export interface ListTasksRequest {
startedBy?: string;
/**
- *
The container instance ID or full ARN of the container instance with which to filter
- * the ListTasks
results. Specifying a containerInstance
limits
- * the results to tasks that belong to that container instance.
The name of the service with which to filter the ListTasks
results.
+ * Specifying a serviceName
limits the results to tasks that belong to that
+ * service.
The nextToken
value returned from a ListTasks
request
- * indicating that more results are available to fulfill the request and further calls will
- * be needed. If maxResults
was provided, it is possible the number of results
- * to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to - * retrieve the next items in a list and not for other programmatic purposes.
- *The task desired status with which to filter the ListTasks
results.
+ * Specifying a desiredStatus
of STOPPED
limits the results to
+ * tasks that Amazon ECS has set the desired status to STOPPED
. This can be useful
+ * for debugging tasks that are not starting properly or have died or finished. The default
+ * status filter is RUNNING
, which shows tasks that Amazon ECS has set the desired
+ * status to RUNNING
.
Although you can filter results based on a desired status of PENDING
,
+ * this does not return any results. Amazon ECS never sets the desired status of a task to
+ * that value (only a task's lastStatus
may have a value of
+ * PENDING
).
The maximum number of task results returned by ListTasks
in paginated
- * output. When this parameter is used, ListTasks
only returns
- * maxResults
results in a single page along with a nextToken
- * response element. The remaining results of the initial request can be seen by sending
- * another ListTasks
request with the returned nextToken
value.
- * This value can be between 1 and 100. If this parameter is
- * not used, then ListTasks
returns up to 100 results and a
- * nextToken
value if applicable.
The launch type for services to list.
*/ - maxResults?: number; + launchType?: LaunchType | string; } export namespace ListTasksRequest { @@ -7206,6 +7284,11 @@ export namespace ListTasksRequest { } export interface ListTasksResponse { + /** + *The list of task ARN entries for the ListTasks
request.
The nextToken
value to include in a future ListTasks
* request. When the results of a ListTasks
request exceed
@@ -7214,11 +7297,6 @@ export interface ListTasksResponse {
* return.
The list of task ARN entries for the ListTasks
request.
The ARN of the principal, which can be an IAM user, IAM role, or the root user. If you - * specify the root user, it modifies the account setting for all IAM users, IAM roles, and - * the root user of the account unless an IAM user or role explicitly overrides these - * settings. If this field is omitted, the setting is changed only for the authenticated - * user.
- */ - principalArn?: string; - - /** - *The account setting value for the specified principal ARN. Accepted values are
- * enabled
and disabled
.
The Amazon ECS resource name for which to modify the account setting. If
* serviceLongArnFormat
is specified, the ARN for your Amazon ECS services is
@@ -7255,6 +7318,21 @@ export interface PutAccountSettingRequest {
* affected.
The account setting value for the specified principal ARN. Accepted values are
+ * enabled
and disabled
.
The ARN of the principal, which can be an IAM user, IAM role, or the root user. If you + * specify the root user, it modifies the account setting for all IAM users, IAM roles, and + * the root user of the account unless an IAM user or role explicitly overrides these + * settings. If this field is omitted, the setting is changed only for the authenticated + * user.
+ */ + principalArn?: string; } export namespace PutAccountSettingRequest { @@ -7277,12 +7355,6 @@ export namespace PutAccountSettingResponse { } export interface PutAccountSettingDefaultRequest { - /** - *The account setting value for the specified principal ARN. Accepted values are
- * enabled
and disabled
.
The resource name for which to modify the account setting. If
* serviceLongArnFormat
is specified, the ARN for your Amazon ECS services is
@@ -7294,6 +7366,12 @@ export interface PutAccountSettingDefaultRequest {
* setting for CloudWatch Container Insights for your clusters is affected.
The account setting value for the specified principal ARN. Accepted values are
+ * enabled
and disabled
.
The attributes to apply to your resource. You can specify up to 10 custom attributes - * per resource. You can specify up to 10 attributes in a single call.
- */ - attributes: Attribute[] | undefined; - /** *The short name or full Amazon Resource Name (ARN) of the cluster that contains the resource to apply * attributes. If you do not specify a cluster, the default cluster is assumed.
*/ cluster?: string; + + /** + *The attributes to apply to your resource. You can specify up to 10 custom attributes + * per resource. You can specify up to 10 attributes in a single call.
+ */ + attributes: Attribute[] | undefined; } export namespace PutAttributesRequest { @@ -7450,18 +7528,18 @@ export enum PlatformDeviceType { * type is a GPU. */ export interface PlatformDevice { - /** - *The type of device that is available on the container instance. The only supported
- * value is GPU
.
The ID for the GPU(s) on the container instance. The available GPU IDs can also be
* obtained on the container instance in the
* /var/lib/ecs/gpu/nvidia_gpu_info.json
file.
The type of device that is available on the container instance. The only supported
+ * value is GPU
.
The devices that are available on the container instance. The only supported device - * type is a GPU.
+ *The short name or full Amazon Resource Name (ARN) of the cluster with which to register your container + * instance. If you do not specify a cluster, the default cluster is assumed.
*/ - platformDevices?: PlatformDevice[]; + cluster?: string; /** *The instance identity document for the EC2 instance to register. This document can be @@ -7498,6 +7576,28 @@ export interface RegisterContainerInstanceRequest { */ totalResources?: Resource[]; + /** + *
The version information for the Amazon ECS container agent and Docker daemon running on the + * container instance.
+ */ + versionInfo?: VersionInfo; + + /** + *The ARN of the container instance (if it was previously registered).
+ */ + containerInstanceArn?: string; + + /** + *The container instance attributes that this container instance supports.
+ */ + attributes?: Attribute[]; + + /** + *The devices that are available on the container instance. The only supported device + * type is a GPU.
+ */ + platformDevices?: PlatformDevice[]; + /** *The metadata that you apply to the container instance to help you categorize and * organize them. Each tag consists of a key and an optional value, both of which you @@ -7535,28 +7635,6 @@ export interface RegisterContainerInstanceRequest { * */ tags?: Tag[]; - - /** - *
The ARN of the container instance (if it was previously registered).
- */ - containerInstanceArn?: string; - - /** - *The container instance attributes that this container instance supports.
- */ - attributes?: Attribute[]; - - /** - *The version information for the Amazon ECS container agent and Docker daemon running on the - * container instance.
- */ - versionInfo?: VersionInfo; - - /** - *The short name or full Amazon Resource Name (ARN) of the cluster with which to register your container - * instance. If you do not specify a cluster, the default cluster is assumed.
- */ - cluster?: string; } export namespace RegisterContainerInstanceRequest { @@ -7594,12 +7672,81 @@ export interface RegisterTaskDefinitionRequest { */ taskRoleArn?: string; + /** + *The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent + * permission to make AWS API calls on your behalf. The task execution IAM role is required + * depending on the requirements of your task. For more information, see Amazon ECS task + * execution IAM role in the Amazon Elastic Container Service Developer Guide.
+ */ + executionRoleArn?: string; + + /** + *The Docker networking mode to use for the containers in the task. The valid values are
+ * none
, bridge
, awsvpc
, and host
.
+ * If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required.
+ * For Amazon ECS tasks on Amazon EC2 instances, any network mode can be used. If the network
+ * mode is set to none
, you cannot specify port mappings in your container
+ * definitions, and the tasks containers do not have external connectivity. The
+ * host
and awsvpc
network modes offer the highest networking
+ * performance for containers because they use the EC2 network stack instead of the
+ * virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container
+ * ports are mapped directly to the corresponding host port (for the host
+ * network mode) or the attached elastic network interface port (for the
+ * awsvpc
network mode), so you cannot take advantage of dynamic host port
+ * mappings.
When using the host
network mode, you should not run
+ * containers using the root user (UID 0). It is considered best practice
+ * to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network
+ * interface, and you must specify a NetworkConfiguration value when you create
+ * a service or run a task with the task definition. For more information, see Task Networking in the
+ * Amazon Elastic Container Service Developer Guide.
Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with the
+ * ecs-init
package, or AWS Fargate infrastructure support the
+ * awsvpc
network mode.
If the network mode is host
, you cannot run multiple instantiations of the
+ * same task on a single container instance when port mappings are used.
Docker for Windows uses different network modes than Docker for Linux. When you
+ * register a task definition with Windows containers, you must not specify a network mode.
+ * If you use the console to register a task definition with Windows containers, you must
+ * choose the
network mode object.
For more information, see Network + * settings in the Docker run reference.
+ */ + networkMode?: NetworkMode | string; + + /** + *A list of container definitions in JSON format that describe the different containers + * that make up your task.
+ */ + containerDefinitions: ContainerDefinition[] | undefined; + /** *A list of volume definitions in JSON format that containers in your task may * use.
*/ volumes?: Volume[]; + /** + *An array of placement constraint objects to use for the task. You can specify a + * maximum of 10 constraints per task (this limit includes constraints in the task + * definition and those specified at runtime).
+ */ + placementConstraints?: TaskDefinitionPlacementConstraint[]; + + /** + *The task launch type that Amazon ECS should validate the task definition against. This
+ * ensures that the task definition parameters are compatible with the specified launch
+ * type. If no value is specified, it defaults to EC2
.
The number of CPU units used by the task. It can be expressed as an integer using CPU
* units, for example The process namespace to use for the containers in the task. The valid
- * values are If the This parameter is not supported for Windows containers or tasks using the Fargate launch type. The configuration details for the App Mesh proxy. For tasks using the EC2 launch type, the container instances require at
- * least version 1.26.0 of the container agent and at least version 1.26.0-1 of the
- * For tasks using the Fargate launch type, the task or service requires
- * platform version 1.3.0 or later. The task launch type that Amazon ECS should validate the task definition against. This
- * ensures that the task definition parameters are compatible with the specified launch
- * type. If no value is specified, it defaults to The amount of memory (in MiB) used by the task. It can be expressed as an integer
@@ -7712,53 +7818,61 @@ export interface RegisterTaskDefinitionRequest {
memory?: string;
/**
- * The Docker networking mode to use for the containers in the task. The valid values are
- * With the If the network mode is Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants with the
- * If the network mode is Docker for Windows uses different network modes than Docker for Linux. When you
- * register a task definition with Windows containers, you must not specify a network mode.
- * If you use the console to register a task definition with Windows containers, you must
- * choose the For more information, see Network
- * settings in the Docker run reference. An array of placement constraint objects to use for the task. You can specify a
- * maximum of 10 constraints per task (this limit includes constraints in the task
- * definition and those specified at runtime). The metadata that you apply to the task definition to help you categorize and organize
+ * them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only
+ * one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources,
+ * remember that other services may have restrictions on allowed characters.
+ * Generally allowed characters are: letters, numbers, and spaces representable in
+ * UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use A list of container definitions in JSON format that describe the different containers
- * that make up your task. The process namespace to use for the containers in the task. The valid
+ * values are If the This parameter is not supported for Windows containers or tasks using the Fargate launch type. The IPC resource namespace to use for the containers in the task. The valid values are
@@ -7797,54 +7911,21 @@ export interface RegisterTaskDefinitionRequest {
ipcMode?: IpcMode | string;
/**
- * The Elastic Inference accelerators to use for the containers in the task. The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent
- * permission to make AWS API calls on your behalf. The task execution IAM role is required
- * depending on the requirements of your task. For more information, see Amazon ECS task
- * execution IAM role in the Amazon Elastic Container Service Developer Guide. The configuration details for the App Mesh proxy. For tasks using the EC2 launch type, the container instances require at
+ * least version 1.26.0 of the container agent and at least version 1.26.0-1 of the
+ * The metadata that you apply to the task definition to help you categorize and organize
- * them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only
- * one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources,
- * remember that other services may have restrictions on allowed characters.
- * Generally allowed characters are: letters, numbers, and spaces representable in
- * UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use The Elastic Inference accelerators to use for the containers in the task. The list of tags associated with the task definition. The full description of the registered task definition. The full description of the registered task definition. The list of tags associated with the task definition. The platform version the task should run. A platform version is only specified for
- * tasks using the Fargate launch type. If one is not specified, the
- * The The launch type on which to run your task. For more information, see Amazon ECS
- * Launch Types in the Amazon Elastic Container Service Developer Guide. If a The capacity provider strategy to use for the task. A capacity provider strategy consists of one or more capacity providers along with the
@@ -7932,7 +7989,85 @@ export interface RunTaskRequest {
* The PutClusterCapacityProviders API operation is used to update the
* list of available capacity providers for a cluster after the cluster is created. The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task.
+ * If you do not specify a cluster, the default cluster is assumed. The number of instantiations of the specified task to place on your cluster. You can
+ * specify up to 10 tasks per call. Specifies whether to enable Amazon ECS managed tags for the task. For more information, see
+ * Tagging Your Amazon ECS
+ * Resources in the Amazon Elastic Container Service Developer Guide. The name of the task group to associate with the task. The default value is the family
+ * name of the task definition (for example, family:my-family-name). The launch type on which to run your task. For more information, see Amazon ECS
+ * Launch Types in the Amazon Elastic Container Service Developer Guide. If a The network configuration for the task. This parameter is required for task
+ * definitions that use the A list of container overrides in JSON format that specify the name of a container in
+ * the specified task definition and the overrides it should receive. You can override the
+ * default command for a container (that is specified in the task definition or Docker
+ * image) with a A total of 8192 characters are allowed for overrides. This limit includes the JSON
+ * formatting characters of the override structure. An array of placement constraint objects to use for the task. You can specify up to 10
+ * constraints per task (including constraints in the task definition and those specified
+ * at runtime). The placement strategy objects to use for the task. You can specify a maximum of five
+ * strategy rules per task. The platform version the task should run. A platform version is only specified for
+ * tasks using the Fargate launch type. If one is not specified, the
+ * Specifies whether to propagate the tags from the task definition to the task. If no
@@ -7946,30 +8081,21 @@ export interface RunTaskRequest {
propagateTags?: PropagateTags | string;
/**
- * The placement strategy objects to use for the task. You can specify a maximum of five
- * strategy rules per task. The name of the task group to associate with the task. The default value is the family
- * name of the task definition (for example, family:my-family-name). Specifies whether to enable Amazon ECS managed tags for the task. For more information, see
- * Tagging Your Amazon ECS
- * Resources in the Amazon Elastic Container Service Developer Guide. The reference ID to use for the task. An array of placement constraint objects to use for the task. You can specify up to 10
- * constraints per task (including constraints in the task definition and those specified
- * at runtime). An optional tag specified when a task is started. For example, if you automatically
+ * trigger a task to run a batch process job, you could apply a unique identifier for that
+ * job to your task with the If a task is started by an Amazon ECS service, then the The metadata that you apply to the task to help you categorize and organize them. Each
@@ -8009,56 +8135,11 @@ export interface RunTaskRequest {
tags?: Tag[];
/**
- * The number of instantiations of the specified task to place on your cluster. You can
- * specify up to 10 tasks per call. A list of container overrides in JSON format that specify the name of a container in
- * the specified task definition and the overrides it should receive. You can override the
- * default command for a container (that is specified in the task definition or Docker
- * image) with a A total of 8192 characters are allowed for overrides. This limit includes the JSON
- * formatting characters of the override structure. The reference ID to use for the task. An optional tag specified when a task is started. For example, if you automatically
- * trigger a task to run a batch process job, you could apply a unique identifier for that
- * job to your task with the If a task is started by an Amazon ECS service, then the The network configuration for the task. This parameter is required for task
- * definitions that use the The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task.
- * If you do not specify a cluster, the default cluster is assumed. The Any failures associated with the call. A full description of the tasks that were run. The tasks that were successfully placed
* on your cluster are described here. Any failures associated with the call. Specifies whether to propagate the tags from the task definition or the service to the
- * task. If no value is specified, the tags are not propagated. The short name or full Amazon Resource Name (ARN) of the cluster on which to start your task.
+ * If you do not specify a cluster, the default cluster is assumed. An optional tag specified when a task is started. For example, if you automatically
- * trigger a task to run a batch process job, you could apply a unique identifier for that
- * job to your task with the If a task is started by an Amazon ECS service, then the The container instance IDs or full ARN entries for the container instances on which
+ * you would like to place your task. You can specify up to 10 container instances. Specifies whether to enable Amazon ECS managed tags for the task. For more information, see
+ * Tagging Your Amazon ECS
+ * Resources in the Amazon Elastic Container Service Developer Guide. The name of the task group to associate with the task. The default value is the family
+ * name of the task definition (for example, family:my-family-name). The VPC subnet and security group configuration for tasks that receive their own
+ * elastic network interface by using the A list of container overrides in JSON format that specify the name of a container in
@@ -8120,16 +8214,27 @@ export interface StartTaskRequest {
overrides?: TaskOverride;
/**
- * The container instance IDs or full ARN entries for the container instances on which
- * you would like to place your task. You can specify up to 10 container instances. Specifies whether to propagate the tags from the task definition or the service to the
+ * task. If no value is specified, the tags are not propagated. The name of the task group to associate with the task. The default value is the family
- * name of the task definition (for example, family:my-family-name). The reference ID to use for the task. An optional tag specified when a task is started. For example, if you automatically
+ * trigger a task to run a batch process job, you could apply a unique identifier for that
+ * job to your task with the If a task is started by an Amazon ECS service, then the The metadata that you apply to the task to help you categorize and organize them. Each
@@ -8168,36 +8273,12 @@ export interface StartTaskRequest {
*/
tags?: Tag[];
- /**
- * The reference ID to use for the task. The Specifies whether to enable Amazon ECS managed tags for the task. For more information, see
- * Tagging Your Amazon ECS
- * Resources in the Amazon Elastic Container Service Developer Guide. The short name or full Amazon Resource Name (ARN) of the cluster on which to start your task.
- * If you do not specify a cluster, the default cluster is assumed. The VPC subnet and security group configuration for tasks that receive their own
- * elastic network interface by using the Any failures associated with the call. A full description of the tasks that were started. Each task that was successfully
* placed on your container instances is described. Any failures associated with the call. The status of the attachment. The Amazon Resource Name (ARN) of the attachment. The Amazon Resource Name (ARN) of the attachment. The status of the attachment. Any attachments associated with the state change request. The short name or full ARN of the cluster that hosts the container instance the
* attachment belongs to. Any attachments associated with the state change request. The task ID or full Amazon Resource Name (ARN) of the task that hosts the container. The short name or full ARN of the cluster that hosts the container. The exit code returned for the state change request. The task ID or full Amazon Resource Name (ARN) of the task that hosts the container. The status of the state change request. The name of the container. The ID of the Docker container. The network bindings of the container. The status of the state change request. The name of the container. The exit code returned for the state change request. The reason for the state change request. The network bindings of the container. The exit code for the container, if the state change is a result of the container
- * exiting. The name of the container. The ID of the Docker container. The container image SHA 256 digest. The status of the container. The ID of the Docker container. The container image SHA 256 digest. The exit code for the container, if the state change is a result of the container
+ * exiting. Any network bindings associated with the container. The name of the container. The status of the container. Any containers associated with the state change request. The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task. The Unix timestamp for when the container image pull began. The task ID or full ARN of the task in the state change request. The reason for the state change request. The status of the state change request. The Unix timestamp for when the container image pull completed. The reason for the state change request. The task ID or full ARN of the task in the state change request. Any containers associated with the state change request. The status of the state change request. Any attachments associated with the state change request. The Unix timestamp for when the task execution stopped. The Unix timestamp for when the container image pull began. The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task. The Unix timestamp for when the container image pull completed. Any attachments associated with the state change request. The Unix timestamp for when the task execution stopped. The keys of the tags to be removed. The Amazon Resource Name (ARN) of the resource from which to delete tags. Currently, the supported
* resources are Amazon ECS capacity providers, tasks, services, task definitions, clusters, and
* container instances. The keys of the tags to be removed. The details of the Auto Scaling group capacity provider to update. The managed scaling settings for the Auto Scaling group capacity provider. When managed scaling is enabled, Amazon ECS manages the scale-in and scale-out actions of
+ * the Auto Scaling group. Amazon ECS manages a target tracking scaling policy using an
+ * Amazon ECS-managed CloudWatch metric with the specified If managed scaling is disabled, the user must manage the scaling of the Auto Scaling
+ * group. The managed termination protection setting to use for the Auto Scaling group capacity
+ * provider. This determines whether the Auto Scaling group has managed termination
+ * protection. When using managed termination protection, managed scaling must also be used
+ * otherwise managed termination protection will not work. When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in
+ * an Auto Scaling group that contain tasks from being terminated during a scale-in action.
+ * The Auto Scaling group and each instance in the Auto Scaling group must have instance
+ * protection from scale-in actions enabled as well. For more information, see Instance Protection in the AWS Auto Scaling User Guide. When managed termination protection is disabled, your Amazon EC2 instances are not
+ * protected from termination when the Auto Scaling group scales in. An object representing the parameters to update for the Auto Scaling group capacity
+ * provider. The name of the capacity provider to update. The details of a capacity provider. The name of the cluster to modify the settings for. The container instance ID or full ARN entries for the container instance on which
- * you would like to update the Amazon ECS container agent. The short name or full Amazon Resource Name (ARN) of the cluster that your container instance is
* running on. If you do not specify a cluster, the default cluster is assumed. The container instance ID or full ARN entries for the container instance on which
+ * you would like to update the Amazon ECS container agent. A list of container instance IDs or full ARN entries. The container instance state with which to update the container instance. The only
* valid values for this action are A list of container instance IDs or full ARN entries. Any failures associated with the call. The list of container instances. The list of container instances. Any failures associated with the call. The short name or full Amazon Resource Name (ARN) of the cluster that your service is running on.
+ * If you do not specify a cluster, the default cluster is assumed. The name of the service to update. The number of instantiations of the task to place and keep running in your
+ * service. The The capacity provider strategy to update the service to use. If the service is using the default capacity provider strategy for the cluster, the
@@ -8773,16 +8951,27 @@ export interface UpdateServiceRequest {
*/
capacityProviderStrategy?: CapacityProviderStrategyItem[];
+ /**
+ * Optional deployment parameters that control how many tasks run during the deployment
+ * and the ordering of stopping and starting tasks. An object representing the network configuration for a task or service. The short name or full Amazon Resource Name (ARN) of the cluster that your service is running on.
- * If you do not specify a cluster, the default cluster is assumed. An array of task placement constraint objects to update the service to use. If no
+ * value is specified, the existing placement constraints for the service will remain
+ * unchanged. If this value is specified, it will override any existing placement
+ * constraints defined for the service. To remove all existing placement constraints,
+ * specify an empty array. You can specify a maximum of 10 constraints per task (this limit includes constraints
+ * in the task definition and those specified at runtime). The task placement strategy objects to update the service to use. If no value is
@@ -8793,38 +8982,6 @@ export interface UpdateServiceRequest {
*/
placementStrategy?: PlacementStrategy[];
- /**
- * The number of instantiations of the task to place and keep running in your
- * service. The name of the service to update. The period of time, in seconds, that the Amazon ECS service scheduler should ignore
- * unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid
- * if your service is configured to use a load balancer. If your service's tasks take a
- * while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace
- * period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service
- * scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS
- * service scheduler from marking tasks as unhealthy and stopping them before they have
- * time to come up. The The platform version on which your tasks in the service are running. A platform
* version is only specified for tasks using the Fargate launch type. If a
@@ -8844,21 +9001,16 @@ export interface UpdateServiceRequest {
forceNewDeployment?: boolean;
/**
- * An array of task placement constraint objects to update the service to use. If no
- * value is specified, the existing placement constraints for the service will remain
- * unchanged. If this value is specified, it will override any existing placement
- * constraints defined for the service. To remove all existing placement constraints,
- * specify an empty array. You can specify a maximum of 10 constraints per task (this limit includes constraints
- * in the task definition and those specified at runtime). Optional deployment parameters that control how many tasks run during the deployment
- * and the ordering of stopping and starting tasks. The period of time, in seconds, that the Amazon ECS service scheduler should ignore
+ * unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid
+ * if your service is configured to use a load balancer. If your service's tasks take a
+ * while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace
+ * period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service
+ * scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS
+ * service scheduler from marking tasks as unhealthy and stopping them before they have
+ * time to come up. The short name or full Amazon Resource Name (ARN) of the service that the task set exists in. The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task
* set exists in. The short name or full Amazon Resource Name (ARN) of the service that the task set exists in. The short name or full Amazon Resource Name (ARN) of the task set to set as the primary task set in the
* deployment. A floating-point percentage of the desired number of tasks to place and keep running
- * in the task set. The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task
+ * set exists in. The short name or full Amazon Resource Name (ARN) of the task set to update. The short name or full Amazon Resource Name (ARN) of the service that the task set exists in. The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task
- * set exists in. The short name or full Amazon Resource Name (ARN) of the task set to update. The short name or full Amazon Resource Name (ARN) of the service that the task set exists in. A floating-point percentage of the desired number of tasks to place and keep running
+ * in the task set. This specifies the name of the environment with the in-progress update that you want to
+ * This specifies the ID of the environment with the in-progress update that you want to
* cancel. This specifies the ID of the environment with the in-progress update that you want to
+ * This specifies the name of the environment with the in-progress update that you want to
* cancel.1024
, or as a string using vCPUs, for example 1
@@ -7634,48 +7781,7 @@ export interface RegisterTaskDefinitionRequest {
*
*
*/
- cpu?: string;
-
- /**
- *
host
or task
. If host
- * is specified, then all containers within the tasks that specified the
- * host
PID mode on the same container instance share the
- * same process namespace with the host Amazon EC2 instance. If task
is
- * specified, all containers within the specified task share the same
- * process namespace. If no value is specified, the default is a private
- * namespace. For more information, see PID settings in the Docker run
- * reference.host
PID mode is used, be aware that there is a
- * heightened risk of undesired process namespace expose. For more
- * information, see Docker
- * security.ecs-init
package to enable a proxy configuration. If your container
- * instances are launched from the Amazon ECS-optimized AMI version 20190301
or
- * later, then they contain the required versions of the container agent and
- * ecs-init
. For more information, see Amazon ECS-optimized Linux AMI
- * in the Amazon Elastic Container Service Developer Guide.EC2
.none
, bridge
, awsvpc
, and host
.
- * The default Docker network mode is bridge
. If you are using the
- * Fargate launch type, the awsvpc
network mode is required. If
- * you are using the EC2 launch type, any network mode can be used. If the network
- * mode is set to none
, you cannot specify port mappings in your container
- * definitions, and the tasks containers do not have external connectivity. The
- * host
and awsvpc
network modes offer the highest networking
- * performance for containers because they use the EC2 network stack instead of the
- * virtualized network stack provided by the bridge
mode.host
and awsvpc
network modes, exposed container
- * ports are mapped directly to the corresponding host port (for the host
- * network mode) or the attached elastic network interface port (for the
- * awsvpc
network mode), so you cannot take advantage of dynamic host port
- * mappings. awsvpc
, the task is allocated an elastic network
- * interface, and you must specify a NetworkConfiguration value when you create
- * a service or run a task with the task definition. For more information, see Task Networking in the
- * Amazon Elastic Container Service Developer Guide.ecs-init
package, or AWS Fargate infrastructure support the
- * awsvpc
network mode. host
, you cannot run multiple instantiations of the
- * same task on a single container instance when port mappings are used.
network mode object.
+ *
*/
- placementConstraints?: TaskDefinitionPlacementConstraint[];
+ tags?: Tag[];
/**
- * aws:
, AWS:
, or any upper or lowercase
+ * combination of such as a prefix for either keys or values as it is reserved for
+ * AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with
+ * this prefix do not count against your tags per resource limit.host
or task
. If host
+ * is specified, then all containers within the tasks that specified the
+ * host
PID mode on the same container instance share the
+ * same process namespace with the host Amazon EC2 instance. If task
is
+ * specified, all containers within the specified task share the same
+ * process namespace. If no value is specified, the default is a private
+ * namespace. For more information, see PID settings in the Docker run
+ * reference.host
PID mode is used, be aware that there is a
+ * heightened risk of undesired process namespace expose. For more
+ * information, see Docker
+ * security.ecs-init
package to enable a proxy configuration. If your container
+ * instances are launched from the Amazon ECS-optimized AMI version 20190301
or
+ * later, then they contain the required versions of the container agent and
+ * ecs-init
. For more information, see Amazon ECS-optimized Linux AMI
+ *
- *
+ * aws:
, AWS:
, or any upper or lowercase
- * combination of such as a prefix for either keys or values as it is reserved for
- * AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with
- * this prefix do not count against your tags per resource limit.LATEST
platform version is used by default. For more information, see
- * AWS Fargate Platform
- * Versions in the Amazon Elastic Container Service Developer Guide.family
and revision
(family:revision
) or
- * full ARN of the task definition to run. If a revision
is not specified,
- * the latest ACTIVE
revision is used.launchType
is specified, the capacityProviderStrategy
- * parameter must be omitted.launchType
is specified, the capacityProviderStrategy
+ * parameter must be omitted.awsvpc
network mode to receive their own elastic
+ * network interface, and it is not supported for other network modes. For more
+ * information, see Task Networking
+ * in the Amazon Elastic Container Service Developer Guide.command
override. You can also override existing environment
+ * variables (that are specified in the task definition or Docker image) on a container or
+ * add new environment variables to it with an environment
override.LATEST
platform version is used by default. For more information, see
+ * AWS Fargate Platform
+ * Versions in the Amazon Elastic Container Service Developer Guide.startedBy
parameter. You can then identify which
+ * tasks belong to that job by filtering the results of a ListTasks call
+ * with the startedBy
value. Up to 36 letters (uppercase and lowercase),
+ * numbers, hyphens, and underscores are allowed.startedBy
parameter
+ * contains the deployment ID of the service that starts it.command
override. You can also override existing environment
- * variables (that are specified in the task definition or Docker image) on a container or
- * add new environment variables to it with an environment
override.startedBy
parameter. You can then identify which
- * tasks belong to that job by filtering the results of a ListTasks call
- * with the startedBy
value. Up to 36 letters (uppercase and lowercase),
- * numbers, hyphens, and underscores are allowed.startedBy
parameter
- * contains the deployment ID of the service that starts it.awsvpc
network mode to receive their own elastic
- * network interface, and it is not supported for other network modes. For more
- * information, see Task Networking
- * in the Amazon Elastic Container Service Developer Guide.family
and revision
(family:revision
) or
+ * full ARN of the task definition to run. If a revision
is not specified,
+ * the latest ACTIVE
revision is used.startedBy
parameter. You can then identify which
- * tasks belong to that job by filtering the results of a ListTasks call
- * with the startedBy
value. Up to 36 letters (uppercase and lowercase),
- * numbers, hyphens, and underscores are allowed.startedBy
parameter
- * contains the deployment ID of the service that starts it.awsvpc
networking mode.startedBy
parameter. You can then identify which
+ * tasks belong to that job by filtering the results of a ListTasks call
+ * with the startedBy
value. Up to 36 letters (uppercase and lowercase),
+ * numbers, hyphens, and underscores are allowed.startedBy
parameter
+ * contains the deployment ID of the service that starts it.family
and revision
(family:revision
) or
* full ARN of the task definition to start. If a revision
is not specified,
* the latest ACTIVE
revision is used.awsvpc
networking mode.targetCapacity
value as the
+ * target value for the metric. For more information, see Using Managed Scaling in the Amazon Elastic Container Service Developer Guide.ACTIVE
and DRAINING
. A
@@ -8716,11 +8873,6 @@ export interface UpdateContainerInstancesStateRequest {
* will be unable to update the container instance state.family
and revision
(family:revision
) or
+ * full ARN of the task definition to run in your service. If a revision
is
+ * not specified, the latest ACTIVE
revision is used. If you modify the task
+ * definition with UpdateService
, Amazon ECS spawns a task with the new version of
+ * the task definition and then stops an old task after the new version is running.family
and revision
(family:revision
) or
- * full ARN of the task definition to run in your service. If a revision
is
- * not specified, the latest ACTIVE
revision is used. If you modify the task
- * definition with UpdateService
, Amazon ECS spawns a task with the new version of
- * the task definition and then stops an old task after the new version is running.
Specify true
to apply the rule, or false
to disable
- * it.
Specify the maximum number of application versions to retain.
*/ @@ -99,6 +93,12 @@ export interface MaxCountRule { * Elastic Beanstalk deletes the application version. */ DeleteSourceFromS3?: boolean; + + /** + *Specify true
to apply the rule, or false
to disable
+ * it.
Specify a max count rule to restrict the number of application versions that are + *
Specify a max age rule to restrict the length of time that application versions are * retained for an application.
*/ - MaxCountRule?: MaxCountRule; + MaxAgeRule?: MaxAgeRule; /** - *Specify a max age rule to restrict the length of time that application versions are + *
Specify a max count rule to restrict the number of application versions that are * retained for an application.
*/ - MaxAgeRule?: MaxAgeRule; + MaxCountRule?: MaxCountRule; } export namespace ApplicationVersionLifecycleConfig { @@ -142,6 +142,11 @@ export namespace ApplicationVersionLifecycleConfig { * settings for application versions. */ export interface ApplicationResourceLifecycleConfig { + /** + *Defines lifecycle settings for application versions.
+ */ + VersionLifecycleConfig?: ApplicationVersionLifecycleConfig; + /** *The ARN of an IAM service role that Elastic Beanstalk has permission to * assume.
@@ -154,11 +159,6 @@ export interface ApplicationResourceLifecycleConfig { * subsequent calls to change the Service Role to another value. */ ServiceRole?: string; - - /** - *Defines lifecycle settings for application versions.
- */ - VersionLifecycleConfig?: ApplicationVersionLifecycleConfig; } export namespace ApplicationResourceLifecycleConfig { @@ -172,44 +172,44 @@ export namespace ApplicationResourceLifecycleConfig { */ export interface ApplicationDescription { /** - *The date when the application was last modified.
+ *The Amazon Resource Name (ARN) of the application.
*/ - DateUpdated?: Date; + ApplicationArn?: string; /** - *The name of the application.
+ *User-defined description of the application.
*/ - ApplicationName?: string; + Description?: string; /** - *The names of the versions for this application.
+ *The names of the configuration templates associated with this application.
*/ - Versions?: string[]; + ConfigurationTemplates?: string[]; /** - *The lifecycle settings for the application.
+ *The date when the application was created.
*/ - ResourceLifecycleConfig?: ApplicationResourceLifecycleConfig; + DateCreated?: Date; /** - *User-defined description of the application.
+ *The names of the versions for this application.
*/ - Description?: string; + Versions?: string[]; /** - *The Amazon Resource Name (ARN) of the application.
+ *The name of the application.
*/ - ApplicationArn?: string; + ApplicationName?: string; /** - *The names of the configuration templates associated with this application.
+ *The date when the application was last modified.
*/ - ConfigurationTemplates?: string[]; + DateUpdated?: Date; /** - *The date when the application was created.
+ *The lifecycle settings for the application.
*/ - DateCreated?: Date; + ResourceLifecycleConfig?: ApplicationResourceLifecycleConfig; } export namespace ApplicationDescription { @@ -256,34 +256,34 @@ export namespace ApplicationDescriptionsMessage { */ export interface Latency { /** - *The average latency for the slowest 50 percent of requests over the last 10 + *
The average latency for the slowest 1 percent of requests over the last 10 * seconds.
*/ - P50?: number; + P99?: number; /** - *The average latency for the slowest 5 percent of requests over the last 10 + *
The average latency for the slowest 10 percent of requests over the last 10 * seconds.
*/ - P95?: number; + P90?: number; /** - *The average latency for the slowest 15 percent of requests over the last 10 + *
The average latency for the slowest 50 percent of requests over the last 10 * seconds.
*/ - P85?: number; + P50?: number; /** - *The average latency for the slowest 25 percent of requests over the last 10 + *
The average latency for the slowest 90 percent of requests over the last 10 * seconds.
*/ - P75?: number; + P10?: number; /** - *The average latency for the slowest 10 percent of requests over the last 10 + *
The average latency for the slowest 25 percent of requests over the last 10 * seconds.
*/ - P90?: number; + P75?: number; /** *The average latency for the slowest 0.1 percent of requests over the last 10 @@ -292,16 +292,16 @@ export interface Latency { P999?: number; /** - *
The average latency for the slowest 90 percent of requests over the last 10 + *
The average latency for the slowest 15 percent of requests over the last 10 * seconds.
*/ - P10?: number; + P85?: number; /** - *The average latency for the slowest 1 percent of requests over the last 10 + *
The average latency for the slowest 5 percent of requests over the last 10 * seconds.
*/ - P99?: number; + P95?: number; } export namespace Latency { @@ -316,12 +316,6 @@ export namespace Latency { * Definitions. */ export interface StatusCodes { - /** - *The percentage of requests over the last 10 seconds that resulted in a 3xx (300, 301, - * etc.) status code.
- */ - Status3xx?: number; - /** *The percentage of requests over the last 10 seconds that resulted in a 4xx (400, 401, * etc.) status code.
@@ -339,6 +333,12 @@ export interface StatusCodes { * etc.) status code. */ Status2xx?: number; + + /** + *The percentage of requests over the last 10 seconds that resulted in a 3xx (300, 301, + * etc.) status code.
+ */ + Status3xx?: number; } export namespace StatusCodes { @@ -351,12 +351,6 @@ export namespace StatusCodes { *Application request metrics for an AWS Elastic Beanstalk environment.
*/ export interface ApplicationMetrics { - /** - *Represents the average latency for the slowest X percent of requests over the last 10 - * seconds. Latencies are in seconds with one millisecond resolution.
- */ - Latency?: Latency; - /** *Average number of requests handled by the web server per second over the last 10 * seconds.
@@ -364,10 +358,10 @@ export interface ApplicationMetrics { RequestCount?: number; /** - *Represents the percentage of requests over the last 10 seconds that resulted in each - * type of status code response.
+ *Represents the average latency for the slowest X percent of requests over the last 10 + * seconds. Latencies are in seconds with one millisecond resolution.
*/ - StatusCodes?: StatusCodes; + Latency?: Latency; /** *The amount of time that the metrics cover (usually 10 seconds). For example, you might
@@ -375,6 +369,12 @@ export interface ApplicationMetrics {
* (duration
).
Represents the percentage of requests over the last 10 seconds that resulted in each + * type of status code response.
+ */ + StatusCodes?: StatusCodes; } export namespace ApplicationMetrics { @@ -385,14 +385,14 @@ export namespace ApplicationMetrics { export interface ApplicationResourceLifecycleDescriptionMessage { /** - *The name of the application.
+ *The lifecycle configuration.
*/ - ApplicationName?: string; + ResourceLifecycleConfig?: ApplicationResourceLifecycleConfig; /** - *The lifecycle configuration.
+ *The name of the application.
*/ - ResourceLifecycleConfig?: ApplicationResourceLifecycleConfig; + ApplicationName?: string; } export namespace ApplicationResourceLifecycleDescriptionMessage { @@ -409,6 +409,23 @@ export type SourceType = "Git" | "Zip"; *Location of the source code for an application version.
*/ export interface SourceBuildInformation { + /** + *The type of repository.
+ *
+ * Git
+ *
+ * Zip
+ *
The location of the source code, as a formatted string, depending on the value of SourceRepository
*
The type of repository.
- *
- * Git
- *
- * Zip
- *
The Amazon S3 bucket where the data is located.
+ *The Amazon S3 key where the data is located.
*/ - S3Bucket?: string; + S3Key?: string; /** - *The Amazon S3 key where the data is located.
+ *The Amazon S3 bucket where the data is located.
*/ - S3Key?: string; + S3Bucket?: string; } export namespace S3Location { @@ -498,19 +498,14 @@ export type ApplicationVersionStatus = "Building" | "Failed" | "Processed" | "Pr */ export interface ApplicationVersionDescription { /** - *The name of the application to which the application version belongs.
- */ - ApplicationName?: string; - - /** - *The storage location of the application version's source bundle in Amazon S3.
+ *Reference to the artifact from the AWS CodeBuild build.
*/ - SourceBundle?: S3Location; + BuildArn?: string; /** - *The last modified date of the application version.
+ *The description of the application version.
*/ - DateUpdated?: Date; + Description?: string; /** *If the version's source code was retrieved from AWS CodeCommit, the location of the @@ -519,9 +514,19 @@ export interface ApplicationVersionDescription { SourceBuildInformation?: SourceBuildInformation; /** - *
A unique identifier for the application version.
+ *The creation date of the application version.
*/ - VersionLabel?: string; + DateCreated?: Date; + + /** + *The storage location of the application version's source bundle in Amazon S3.
+ */ + SourceBundle?: S3Location; + + /** + *The name of the application to which the application version belongs.
+ */ + ApplicationName?: string; /** *The processing status of the application version. Reflects the state of the application @@ -558,24 +563,19 @@ export interface ApplicationVersionDescription { Status?: ApplicationVersionStatus | string; /** - *
The description of the application version.
- */ - Description?: string; - - /** - *Reference to the artifact from the AWS CodeBuild build.
+ *The Amazon Resource Name (ARN) of the application version.
*/ - BuildArn?: string; + ApplicationVersionArn?: string; /** - *The Amazon Resource Name (ARN) of the application version.
+ *A unique identifier for the application version.
*/ - ApplicationVersionArn?: string; + VersionLabel?: string; /** - *The creation date of the application version.
+ *The last modified date of the application version.
*/ - DateCreated?: Date; + DateUpdated?: Date; } export namespace ApplicationVersionDescription { @@ -605,17 +605,17 @@ export namespace ApplicationVersionDescriptionMessage { *Result message wrapping a list of application version descriptions.
*/ export interface ApplicationVersionDescriptionsMessage { - /** - *List of ApplicationVersionDescription
objects sorted in order of
- * creation.
In a paginated request, the token that you can pass in a subsequent request to get the * next response page.
*/ NextToken?: string; + + /** + *List of ApplicationVersionDescription
objects sorted in order of
+ * creation.
A description of the managed action.
+ *The type of managed action.
*/ - ActionDescription?: string; + ActionType?: ActionType | string; /** - *The status of the managed action.
+ *A description of the managed action.
*/ - Status?: string; + ActionDescription?: string; /** *The action ID of the managed action.
@@ -670,9 +670,9 @@ export interface ApplyEnvironmentManagedActionResult { ActionId?: string; /** - *The type of managed action.
+ *The status of the managed action.
*/ - ActionType?: ActionType | string; + Status?: string; } export namespace ApplyEnvironmentManagedActionResult { @@ -722,15 +722,15 @@ export namespace ManagedActionInvalidStateException { */ export interface AssociateEnvironmentOperationsRoleMessage { /** - *The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's - * operations role.
+ *The name of the environment to which to set the operations role.
*/ - OperationsRole: string | undefined; + EnvironmentName: string | undefined; /** - *The name of the environment to which to set the operations role.
+ *The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's + * operations role.
*/ - EnvironmentName: string | undefined; + OperationsRole: string | undefined; } export namespace AssociateEnvironmentOperationsRoleMessage { @@ -760,14 +760,14 @@ export namespace AutoScalingGroup { */ export interface SolutionStackDescription { /** - *The name of the solution stack.
+ *The permitted file types allowed for a solution stack.
*/ - SolutionStackName?: string; + PermittedFileTypes?: string[]; /** - *The permitted file types allowed for a solution stack.
+ *The name of the solution stack.
*/ - PermittedFileTypes?: string[]; + SolutionStackName?: string; } export namespace SolutionStackDescription { @@ -796,6 +796,12 @@ export namespace CheckDNSAvailabilityMessage { *Indicates if the specified CNAME is available.
*/ export interface CheckDNSAvailabilityResultMessage { + /** + *The fully qualified CNAME to reserve when CreateEnvironment is called + * with the provided prefix.
+ */ + FullyQualifiedCNAME?: string; + /** *Indicates if the specified CNAME is available:
*The fully qualified CNAME to reserve when CreateEnvironment is called - * with the provided prefix.
- */ - FullyQualifiedCNAME?: string; } export namespace CheckDNSAvailabilityResultMessage { @@ -829,12 +829,9 @@ export namespace CheckDNSAvailabilityResultMessage { */ export interface ComposeEnvironmentsMessage { /** - *A list of version labels, specifying one or more application source bundles that belong - * to the target application. Each source bundle must include an environment manifest that - * specifies the name of the environment and the name of the solution stack to use, and - * optionally can specify environment links to create.
+ *The name of the application to which the specified source bundles belong.
*/ - VersionLabels?: string[]; + ApplicationName?: string; /** *The name of the group to which the target environments belong. Specify a group name @@ -845,9 +842,12 @@ export interface ComposeEnvironmentsMessage { GroupName?: string; /** - *
The name of the application to which the specified source bundles belong.
+ *A list of version labels, specifying one or more application source bundles that belong + * to the target application. Each source bundle must include an environment manifest that + * specifies the name of the environment and the name of the solution stack to use, and + * optionally can specify environment links to create.
*/ - ApplicationName?: string; + VersionLabels?: string[]; } export namespace ComposeEnvironmentsMessage { @@ -919,9 +919,9 @@ export namespace Listener { */ export interface LoadBalancerDescription { /** - *A list of Listeners used by the LoadBalancer.
+ *The domain name of the LoadBalancer.
*/ - Listeners?: Listener[]; + Domain?: string; /** *The name of the LoadBalancer.
@@ -929,9 +929,9 @@ export interface LoadBalancerDescription { LoadBalancerName?: string; /** - *The domain name of the LoadBalancer.
+ *A list of Listeners used by the LoadBalancer.
*/ - Domain?: string; + Listeners?: Listener[]; } export namespace LoadBalancerDescription { @@ -971,6 +971,16 @@ export type EnvironmentStatus = *Describes the properties of an environment tier
*/ export interface EnvironmentTier { + /** + *The version of this environment tier. When you don't set a value to it, Elastic Beanstalk uses the + * latest compatible worker tier version.
+ *This member is deprecated. Any specific version that you set may become out of date. + * We recommend leaving it unspecified.
+ *The name of this environment tier.
*Valid values:
@@ -1002,16 +1012,6 @@ export interface EnvironmentTier { * */ Type?: string; - - /** - *The version of this environment tier. When you don't set a value to it, Elastic Beanstalk uses the - * latest compatible worker tier version.
- *This member is deprecated. Any specific version that you set may become out of date. - * We recommend leaving it unspecified.
- *Indicates if there is an in-progress environment configuration update or application - * version deployment that you can cancel.
- *
- * true:
There is an update in progress.
- * false:
There are no updates currently in progress.
For load-balanced, autoscaling environments, the URL to the LoadBalancer. For + * single-instance environments, the IP address of the instance.
*/ - AbortableOperationInProgress?: boolean; + EndpointURL?: string; /** - *The name of the configuration template used to originally launch this - * environment.
+ *The name of the application associated with this environment.
*/ - TemplateName?: string; + ApplicationName?: string; /** - *Returns the health status of the application running in your environment. For more - * information, see Health Colors and - * Statuses.
+ *The ARN of the platform version.
*/ - HealthStatus?: EnvironmentHealthStatus | string; + PlatformArn?: string; /** - *A list of links to other environments in the same group.
+ *The last modified date for this environment.
*/ - EnvironmentLinks?: EnvironmentLink[]; + DateUpdated?: Date; /** - *The description of the AWS resources used by this environment.
+ *The name of this environment.
*/ - Resources?: EnvironmentResourcesDescription; + EnvironmentName?: string; + + /** + *The creation date for this environment.
+ */ + DateCreated?: Date; + + /** + *Describes this environment.
+ */ + Description?: string; + + /** + *The environment's Amazon Resource Name (ARN), which can be used in other API requests that require an ARN.
+ */ + EnvironmentArn?: string; + + /** + *The application version deployed in this environment.
+ */ + VersionLabel?: string; /** *Describes the health status of the environment. AWS Elastic Beanstalk indicates the @@ -1088,9 +1101,26 @@ export interface EnvironmentDescription { Health?: EnvironmentHealth | string; /** - *
The application version deployed in this environment.
+ *Returns the health status of the application running in your environment. For more + * information, see Health Colors and + * Statuses.
*/ - VersionLabel?: string; + HealthStatus?: EnvironmentHealthStatus | string; + + /** + *The ID of this environment.
+ */ + EnvironmentId?: string; + + /** + *Indicates if there is an in-progress environment configuration update or application + * version deployment that you can cancel.
+ *
+ * true:
There is an update in progress.
+ * false:
There are no updates currently in progress.
The name of the SolutionStack
deployed with this environment.
The last modified date for this environment.
+ *Describes the current tier of this environment.
*/ - DateUpdated?: Date; + Tier?: EnvironmentTier; /** - *The name of this environment.
+ *The URL to the CNAME for this environment.
*/ - EnvironmentName?: string; + CNAME?: string; /** - *Describes this environment.
+ *The Amazon Resource Name (ARN) of the environment's operations role. For more information, + * see Operations roles in the AWS Elastic Beanstalk Developer Guide.
*/ - Description?: string; + OperationsRole?: string; /** - *The environment's Amazon Resource Name (ARN), which can be used in other API requests that require an ARN.
+ *The name of the configuration template used to originally launch this + * environment.
*/ - EnvironmentArn?: string; + TemplateName?: string; + + /** + *The description of the AWS resources used by this environment.
+ */ + Resources?: EnvironmentResourcesDescription; + + /** + *A list of links to other environments in the same group.
+ */ + EnvironmentLinks?: EnvironmentLink[]; /** *The current operational status of the environment:
@@ -1146,48 +1188,6 @@ export interface EnvironmentDescription { * */ Status?: EnvironmentStatus | string; - - /** - *The name of the application associated with this environment.
- */ - ApplicationName?: string; - - /** - *The URL to the CNAME for this environment.
- */ - CNAME?: string; - - /** - *The ARN of the platform version.
- */ - PlatformArn?: string; - - /** - *For load-balanced, autoscaling environments, the URL to the LoadBalancer. For - * single-instance environments, the IP address of the instance.
- */ - EndpointURL?: string; - - /** - *The creation date for this environment.
- */ - DateCreated?: Date; - - /** - *Describes the current tier of this environment.
- */ - Tier?: EnvironmentTier; - - /** - *The Amazon Resource Name (ARN) of the environment's operations role. For more information, - * see Operations roles in the AWS Elastic Beanstalk Developer Guide.
- */ - OperationsRole?: string; - - /** - *The ID of this environment.
- */ - EnvironmentId?: string; } export namespace EnvironmentDescription { @@ -1200,16 +1200,16 @@ export namespace EnvironmentDescription { *Result message containing a list of environment descriptions.
*/ export interface EnvironmentDescriptionsMessage { - /** - *Returns an EnvironmentDescription list.
- */ - Environments?: EnvironmentDescription[]; - /** *In a paginated request, the token that you can pass in a subsequent request to get the * next response page.
*/ NextToken?: string; + + /** + *Returns an EnvironmentDescription list.
+ */ + Environments?: EnvironmentDescription[]; } export namespace EnvironmentDescriptionsMessage { @@ -1241,14 +1241,14 @@ export namespace TooManyEnvironmentsException { */ export interface Tag { /** - *The value of the tag.
+ *The key of the tag.
*/ - Value?: string; + Key?: string; /** - *The key of the tag.
+ *The value of the tag.
*/ - Key?: string; + Value?: string; } export namespace Tag { @@ -1261,17 +1261,6 @@ export namespace Tag { *Request to create an application.
*/ export interface CreateApplicationMessage { - /** - *Specifies an application resource lifecycle configuration to prevent your application - * from accumulating too many versions.
- */ - ResourceLifecycleConfig?: ApplicationResourceLifecycleConfig; - - /** - *The name of the application. Must be unique within your account.
- */ - ApplicationName: string | undefined; - /** *Your description of the application.
*/ @@ -1283,6 +1272,17 @@ export interface CreateApplicationMessage { * application don't inherit the tags. */ Tags?: Tag[]; + + /** + *Specifies an application resource lifecycle configuration to prevent your application + * from accumulating too many versions.
+ */ + ResourceLifecycleConfig?: ApplicationResourceLifecycleConfig; + + /** + *The name of the application. Must be unique within your account.
+ */ + ApplicationName: string | undefined; } export namespace CreateApplicationMessage { @@ -1337,6 +1337,16 @@ export enum ComputeType { *Settings for an AWS CodeBuild build.
*/ export interface BuildConfiguration { + /** + *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
+ */ + CodeBuildServiceRole: string | undefined; + + /** + *How long in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait until timing out any related build that does not get marked as completed. The default is 60 minutes.
+ */ + TimeoutInMinutes?: number; + /** *The name of the artifact of the CodeBuild build. * If provided, Elastic Beanstalk stores the build artifact in the S3 location @@ -1348,9 +1358,9 @@ export interface BuildConfiguration { ArtifactName?: string; /** - *
The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
+ *The ID of the Docker image to use for this build project.
*/ - CodeBuildServiceRole: string | undefined; + Image: string | undefined; /** *Information about the compute resources the build project will use.
@@ -1373,16 +1383,6 @@ export interface BuildConfiguration { * */ ComputeType?: ComputeType | string; - - /** - *The ID of the Docker image to use for this build project.
- */ - Image: string | undefined; - - /** - *How long in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait until timing out any related build that does not get marked as completed. The default is 60 minutes.
- */ - TimeoutInMinutes?: number; } export namespace BuildConfiguration { @@ -1396,27 +1396,38 @@ export namespace BuildConfiguration { */ export interface CreateApplicationVersionMessage { /** - *A description of this application version.
+ * The name of the application. If no application is found with this name, and
+ * AutoCreateApplication
is false
, returns an
+ * InvalidParameterValue
error.
Settings for an AWS CodeBuild build.
+ *A label identifying this version.
+ *Constraint: Must be unique per application. If an application version already exists
+ * with this label for the specified application, AWS Elastic Beanstalk returns an
+ * InvalidParameterValue
error.
Specify a commit in an AWS CodeCommit Git repository to use as the source code for the - * application version.
+ *The Amazon S3 bucket and key that identify the location of the source bundle for this + * version.
+ *The Amazon S3 bucket must be in the same region as the + * environment.
+ *Specify a source bundle in S3 or a commit in an AWS CodeCommit repository (with
+ * SourceBuildInformation
), but not both. If neither SourceBundle
nor
+ * SourceBuildInformation
are provided, Elastic Beanstalk uses a sample
+ * application.
Specifies the tags applied to the application version.
- *Elastic Beanstalk applies these tags only to the application version. Environments that use the - * application version don't inherit the tags.
+ *A description of this application version.
*/ - Tags?: Tag[]; + Description?: string; /** *Set to true
to create an application with the specified name if it doesn't
@@ -1441,33 +1452,22 @@ export interface CreateApplicationVersionMessage {
Process?: boolean;
/**
- *
A label identifying this version.
- *Constraint: Must be unique per application. If an application version already exists
- * with this label for the specified application, AWS Elastic Beanstalk returns an
- * InvalidParameterValue
error.
Specify a commit in an AWS CodeCommit Git repository to use as the source code for the + * application version.
*/ - VersionLabel: string | undefined; + SourceBuildInformation?: SourceBuildInformation; /** - * The name of the application. If no application is found with this name, and
- * AutoCreateApplication
is false
, returns an
- * InvalidParameterValue
error.
Settings for an AWS CodeBuild build.
*/ - ApplicationName: string | undefined; + BuildConfiguration?: BuildConfiguration; /** - *The Amazon S3 bucket and key that identify the location of the source bundle for this - * version.
- *The Amazon S3 bucket must be in the same region as the - * environment.
- *Specify a source bundle in S3 or a commit in an AWS CodeCommit repository (with
- * SourceBuildInformation
), but not both. If neither SourceBundle
nor
- * SourceBuildInformation
are provided, Elastic Beanstalk uses a sample
- * application.
Specifies the tags applied to the application version.
+ *Elastic Beanstalk applies these tags only to the application version. Environments that use the + * application version don't inherit the tags.
*/ - SourceBundle?: S3Location; + Tags?: Tag[]; } export namespace CreateApplicationVersionMessage { @@ -1533,24 +1533,24 @@ export type ConfigurationDeploymentStatus = "deployed" | "failed" | "pending"; */ export interface ConfigurationOptionSetting { /** - *The name of the configuration option.
+ *The current value for the configuration option.
*/ - OptionName?: string; + Value?: string; /** - *A unique resource name for the option setting. Use it for a time–based scaling configuration option.
+ *A unique namespace that identifies the option's associated AWS resource.
*/ - ResourceName?: string; + Namespace?: string; /** - *The current value for the configuration option.
+ *The name of the configuration option.
*/ - Value?: string; + OptionName?: string; /** - *A unique namespace that identifies the option's associated AWS resource.
+ *A unique resource name for the option setting. Use it for a time–based scaling configuration option.
*/ - Namespace?: string; + ResourceName?: string; } export namespace ConfigurationOptionSetting { @@ -1563,27 +1563,6 @@ export namespace ConfigurationOptionSetting { *Describes the settings for a configuration set.
*/ export interface ConfigurationSettingsDescription { - /** - * If not null
, the name of the environment for this configuration set.
- *
The ARN of the platform version.
- */ - PlatformArn?: string; - - /** - *The name of the application associated with this configuration set.
- */ - ApplicationName?: string; - - /** - *The date (in UTC time) when this configuration set was last modified.
- */ - DateUpdated?: Date; - /** *A list of the configuration options and their values in this configuration * set.
@@ -1591,25 +1570,9 @@ export interface ConfigurationSettingsDescription { OptionSettings?: ConfigurationOptionSetting[]; /** - *The name of the solution stack this configuration set uses.
- */ - SolutionStackName?: string; - - /** - *Describes this configuration set.
- */ - Description?: string; - - /** - *The date (in UTC time) when this configuration set was created.
- */ - DateCreated?: Date; - - /** - * If not null
, the name of the configuration template for this
- * configuration set.
The ARN of the platform version.
*/ - TemplateName?: string; + PlatformArn?: string; /** *If this configuration set is associated with an environment, the @@ -1639,6 +1602,43 @@ export interface ConfigurationSettingsDescription { * */ DeploymentStatus?: ConfigurationDeploymentStatus | string; + + /** + *
If not null
, the name of the configuration template for this
+ * configuration set.
The name of the application associated with this configuration set.
+ */ + ApplicationName?: string; + + /** + *The date (in UTC time) when this configuration set was created.
+ */ + DateCreated?: Date; + + /** + *Describes this configuration set.
+ */ + Description?: string; + + /** + *The name of the solution stack this configuration set uses.
+ */ + SolutionStackName?: string; + + /** + *The date (in UTC time) when this configuration set was last modified.
+ */ + DateUpdated?: Date; + + /** + * If not null
, the name of the environment for this configuration set.
+ *
The name of the application associated with the configuration.
+ *The name of the configuration template.
*/ - ApplicationName?: string; + TemplateName?: string; /** - *The name of the configuration template.
+ *The name of the application associated with the configuration.
*/ - TemplateName?: string; + ApplicationName?: string; } export namespace SourceConfiguration { @@ -1672,41 +1672,6 @@ export namespace SourceConfiguration { *Request to create a configuration template.
*/ export interface CreateConfigurationTemplateMessage { - /** - *The Amazon Resource Name (ARN) of the custom platform. For more information, see Custom - * Platforms in the AWS Elastic Beanstalk Developer Guide.
- *If you specify PlatformArn
, then don't specify
- * SolutionStackName
.
Specifies the tags applied to the configuration template.
- */ - Tags?: Tag[]; - - /** - *The name of the Elastic Beanstalk application to associate with this configuration - * template.
- */ - ApplicationName: string | undefined; - - /** - *An optional description for this configuration.
- */ - Description?: string; - - /** - *The ID of an environment whose settings you want to use to create the configuration
- * template. You must specify EnvironmentId
if you don't specify
- * PlatformArn
, SolutionStackName
, or
- * SourceConfiguration
.
The name of an Elastic Beanstalk solution stack (platform version) that this configuration uses. For
* example, 64bit Amazon Linux 2013.09 running Tomcat 7 Java 7
. A solution stack
@@ -1730,14 +1695,6 @@ export interface CreateConfigurationTemplateMessage {
*/
TemplateName: string | undefined;
- /**
- *
Option values for the Elastic Beanstalk configuration, such as the instance type. If specified, these - * values override the values obtained from the solution stack or the source configuration - * template. For a complete list of Elastic Beanstalk configuration options, see Option Values in the - * AWS Elastic Beanstalk Developer Guide.
- */ - OptionSettings?: ConfigurationOptionSetting[]; - /** *An Elastic Beanstalk configuration template to base this one on. If specified, Elastic Beanstalk uses the configuration values from the specified * configuration template to create a new configuration.
@@ -1751,6 +1708,49 @@ export interface CreateConfigurationTemplateMessage { * name. */ SourceConfiguration?: SourceConfiguration; + + /** + *The name of the Elastic Beanstalk application to associate with this configuration + * template.
+ */ + ApplicationName: string | undefined; + + /** + *Option values for the Elastic Beanstalk configuration, such as the instance type. If specified, these + * values override the values obtained from the solution stack or the source configuration + * template. For a complete list of Elastic Beanstalk configuration options, see Option Values in the + * AWS Elastic Beanstalk Developer Guide.
+ */ + OptionSettings?: ConfigurationOptionSetting[]; + + /** + *The Amazon Resource Name (ARN) of the custom platform. For more information, see Custom + * Platforms in the AWS Elastic Beanstalk Developer Guide.
+ *If you specify PlatformArn
, then don't specify
+ * SolutionStackName
.
The ID of an environment whose settings you want to use to create the configuration
+ * template. You must specify EnvironmentId
if you don't specify
+ * PlatformArn
, SolutionStackName
, or
+ * SourceConfiguration
.
Specifies the tags applied to the configuration template.
+ */ + Tags?: Tag[]; + + /** + *An optional description for this configuration.
+ */ + Description?: string; } export namespace CreateConfigurationTemplateMessage { @@ -1799,6 +1799,11 @@ export namespace TooManyConfigurationTemplatesException { *A specification identifying an individual configuration option.
*/ export interface OptionSpecification { + /** + *A unique resource name for a time-based scaling configuration option.
+ */ + ResourceName?: string; + /** *The name of the configuration option.
*/ @@ -1808,11 +1813,6 @@ export interface OptionSpecification { *A unique namespace identifying the option's associated AWS resource.
*/ Namespace?: string; - - /** - *A unique resource name for a time-based scaling configuration option.
- */ - ResourceName?: string; } export namespace OptionSpecification { @@ -1826,9 +1826,10 @@ export namespace OptionSpecification { */ export interface CreateEnvironmentMessage { /** - *Specifies the tags applied to resources in the environment.
+ *A list of custom user-defined configuration options to remove from the configuration + * set for this new environment.
*/ - Tags?: Tag[]; + OptionsToRemove?: OptionSpecification[]; /** *The name of the Elastic Beanstalk configuration template to use with the environment.
@@ -1839,6 +1840,29 @@ export interface CreateEnvironmentMessage { */ TemplateName?: string; + /** + *If specified, the environment attempts to use this value as the prefix for the CNAME in + * your Elastic Beanstalk environment URL. If not specified, the CNAME is generated automatically by + * appending a random alphanumeric string to the environment name.
+ */ + CNAMEPrefix?: string; + + /** + *A unique name for the environment.
+ *Constraint: Must be from 4 to 40 characters in length. The name can contain only
+ * letters, numbers, and hyphens. It can't start or end with a hyphen. This name must be unique
+ * within a region in your account. If the specified name already exists in the region, Elastic Beanstalk returns an
+ * InvalidParameterValue
error.
If you don't specify the CNAMEPrefix
parameter, the environment name becomes part of
+ * the CNAME, and therefore part of the visible URL for your application.
Specifies the tags applied to resources in the environment.
+ */ + Tags?: Tag[]; + /** *If specified, AWS Elastic Beanstalk sets the specified configuration options to the * requested value in the configuration set for the new environment. These override the values @@ -1846,13 +1870,6 @@ export interface CreateEnvironmentMessage { */ OptionSettings?: ConfigurationOptionSetting[]; - /** - *
Specifies the tier to use in creating this environment. The environment tier that you - * choose determines whether Elastic Beanstalk provisions resources to support a web application that handles - * HTTP(S) requests or a web application that handles background-processing tasks.
- */ - Tier?: EnvironmentTier; - /** *The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's * operations role. If specified, Elastic Beanstalk uses the operations role for permissions to downstream @@ -1863,6 +1880,30 @@ export interface CreateEnvironmentMessage { */ OperationsRole?: string; + /** + *
Specifies the tier to use in creating this environment. The environment tier that you + * choose determines whether Elastic Beanstalk provisions resources to support a web application that handles + * HTTP(S) requests or a web application that handles background-processing tasks.
+ */ + Tier?: EnvironmentTier; + + /** + *Your description for this environment.
+ */ + Description?: string; + + /** + *The name of an Elastic Beanstalk solution stack (platform version) to use with the environment. If + * specified, Elastic Beanstalk sets the configuration values to the default values associated with the + * specified solution stack. For a list of current solution stacks, see Elastic Beanstalk Supported Platforms in the AWS Elastic Beanstalk + * Platforms guide.
+ *If you specify SolutionStackName
, don't specify PlatformArn
or
+ * TemplateName
.
The name of the group to which the target environment belongs. Specify a group name * only if the environment's name is specified in an environment manifest and not with the @@ -1877,13 +1918,6 @@ export interface CreateEnvironmentMessage { */ VersionLabel?: string; - /** - *
If specified, the environment attempts to use this value as the prefix for the CNAME in - * your Elastic Beanstalk environment URL. If not specified, the CNAME is generated automatically by - * appending a random alphanumeric string to the environment name.
- */ - CNAMEPrefix?: string; - /** *The name of the application that is associated with this environment.
*/ @@ -1900,40 +1934,6 @@ export interface CreateEnvironmentMessage { * */ PlatformArn?: string; - - /** - *A list of custom user-defined configuration options to remove from the configuration - * set for this new environment.
- */ - OptionsToRemove?: OptionSpecification[]; - - /** - *The name of an Elastic Beanstalk solution stack (platform version) to use with the environment. If - * specified, Elastic Beanstalk sets the configuration values to the default values associated with the - * specified solution stack. For a list of current solution stacks, see Elastic Beanstalk Supported Platforms in the AWS Elastic Beanstalk - * Platforms guide.
- *If you specify SolutionStackName
, don't specify PlatformArn
or
- * TemplateName
.
Your description for this environment.
- */ - Description?: string; - - /** - *A unique name for the environment.
- *Constraint: Must be from 4 to 40 characters in length. The name can contain only
- * letters, numbers, and hyphens. It can't start or end with a hyphen. This name must be unique
- * within a region in your account. If the specified name already exists in the region, Elastic Beanstalk returns an
- * InvalidParameterValue
error.
If you don't specify the CNAMEPrefix
parameter, the environment name becomes part of
- * the CNAME, and therefore part of the visible URL for your application.
Request to create a new platform version.
*/ export interface CreatePlatformVersionRequest { + /** + *The location of the platform definition archive in Amazon S3.
+ */ + PlatformDefinitionBundle: S3Location | undefined; + + /** + *The name of your custom platform.
+ */ + PlatformName: string | undefined; + /** *Specifies the tags applied to the new platform version.
*Elastic Beanstalk applies these tags only to the platform version. Environments that you create using @@ -1953,11 +1963,6 @@ export interface CreatePlatformVersionRequest { */ Tags?: Tag[]; - /** - *
The location of the platform definition archive in Amazon S3.
- */ - PlatformDefinitionBundle: S3Location | undefined; - /** *The name of the builder environment.
*/ @@ -1971,12 +1976,7 @@ export interface CreatePlatformVersionRequest { /** *The configuration option settings to apply to the builder environment.
*/ - OptionSettings?: ConfigurationOptionSetting[]; - - /** - *The name of your custom platform.
- */ - PlatformName: string | undefined; + OptionSettings?: ConfigurationOptionSetting[]; } export namespace CreatePlatformVersionRequest { @@ -2008,28 +2008,20 @@ export type PlatformStatus = "Creating" | "Deleted" | "Deleting" | "Failed" | "R */ export interface PlatformSummary { /** - *The state of the platform version's branch in its lifecycle.
- *Possible values: beta
| supported
| deprecated
|
- * retired
- *
The AWS account ID of the person who created the platform version.
*/ - PlatformBranchLifecycleState?: string; + PlatformOwner?: string; /** - *The category of platform version.
+ *The platform branch to which the platform version belongs.
*/ - PlatformCategory?: string; + PlatformBranchName?: string; /** *The ARN of the platform version.
*/ PlatformArn?: string; - /** - *The operating system used by the platform version.
- */ - OperatingSystemName?: string; - /** *The status of the platform version. You can create an environment from the platform * version once it is ready.
@@ -2037,19 +2029,27 @@ export interface PlatformSummary { PlatformStatus?: PlatformStatus | string; /** - *The AWS account ID of the person who created the platform version.
+ *The version string of the platform version.
*/ - PlatformOwner?: string; + PlatformVersion?: string; /** - *The version string of the platform version.
+ *The additions associated with the platform version.
*/ - PlatformVersion?: string; + SupportedAddonList?: string[]; /** - *The platform branch to which the platform version belongs.
+ *The state of the platform version's branch in its lifecycle.
+ *Possible values: beta
| supported
| deprecated
|
+ * retired
+ *
The version of the operating system used by the platform version.
+ */ + OperatingSystemVersion?: string; /** *The state of the platform version in its lifecycle.
@@ -2065,14 +2065,14 @@ export interface PlatformSummary { SupportedTierList?: string[]; /** - *The additions associated with the platform version.
+ *The operating system used by the platform version.
*/ - SupportedAddonList?: string[]; + OperatingSystemName?: string; /** - *The version of the operating system used by the platform version.
+ *The category of platform version.
*/ - OperatingSystemVersion?: string; + PlatformCategory?: string; } export namespace PlatformSummary { @@ -2083,14 +2083,14 @@ export namespace PlatformSummary { export interface CreatePlatformVersionResult { /** - *The builder used to create the custom platform.
+ *Detailed information about the new version of the custom platform.
*/ - Builder?: Builder; + PlatformSummary?: PlatformSummary; /** - *Detailed information about the new version of the custom platform.
+ *The builder used to create the custom platform.
*/ - PlatformSummary?: PlatformSummary; + Builder?: Builder; } export namespace CreatePlatformVersionResult { @@ -2196,13 +2196,6 @@ export namespace OperationInProgressException { *Request to delete an application version.
*/ export interface DeleteApplicationVersionMessage { - /** - *Set to true
to delete the source bundle from your storage bucket.
- * Otherwise, the application version is deleted only from Elastic Beanstalk and the source
- * bundle remains in Amazon S3.
The name of the application to which the version belongs.
*/ @@ -2212,6 +2205,13 @@ export interface DeleteApplicationVersionMessage { *The label of the version to delete.
*/ VersionLabel: string | undefined; + + /** + *Set to true
to delete the source bundle from your storage bucket.
+ * Otherwise, the application version is deleted only from Elastic Beanstalk and the source
+ * bundle remains in Amazon S3.
The name of the application to delete the configuration template from.
+ *The name of the configuration template to delete.
*/ - ApplicationName: string | undefined; + TemplateName: string | undefined; /** - *The name of the configuration template to delete.
+ *The name of the application to delete the configuration template from.
*/ - TemplateName: string | undefined; + ApplicationName: string | undefined; } export namespace DeleteConfigurationTemplateMessage { @@ -2349,9 +2349,9 @@ export namespace ResourceQuota { */ export interface ResourceQuotas { /** - *The quota for configuration templates in the AWS account.
+ *The quota for application versions in the AWS account.
*/ - ConfigurationTemplateQuota?: ResourceQuota; + ApplicationVersionQuota?: ResourceQuota; /** *The quota for applications in the AWS account.
@@ -2359,19 +2359,19 @@ export interface ResourceQuotas { ApplicationQuota?: ResourceQuota; /** - *The quota for custom platforms in the AWS account.
+ *The quota for environments in the AWS account.
*/ - CustomPlatformQuota?: ResourceQuota; + EnvironmentQuota?: ResourceQuota; /** - *The quota for application versions in the AWS account.
+ *The quota for custom platforms in the AWS account.
*/ - ApplicationVersionQuota?: ResourceQuota; + CustomPlatformQuota?: ResourceQuota; /** - *The quota for environments in the AWS account.
+ *The quota for configuration templates in the AWS account.
*/ - EnvironmentQuota?: ResourceQuota; + ConfigurationTemplateQuota?: ResourceQuota; } export namespace ResourceQuotas { @@ -2414,17 +2414,6 @@ export namespace DescribeApplicationsMessage { *Request to describe application versions.
*/ export interface DescribeApplicationVersionsMessage { - /** - *Specify an application name to show only application versions for that - * application.
- */ - ApplicationName?: string; - - /** - *Specify a version label to show a specific application version.
- */ - VersionLabels?: string[]; - /** *For a paginated request. Specify a maximum number of application versions to include in * each response.
@@ -2433,12 +2422,23 @@ export interface DescribeApplicationVersionsMessage { */ MaxRecords?: number; + /** + *Specify an application name to show only application versions for that + * application.
+ */ + ApplicationName?: string; + /** *For a paginated request. Specify a token from a previous response page to retrieve the next response page. All other * parameter values must be identical to the ones specified in the initial request.
*If no NextToken
is specified, the first page is retrieved.
Specify a version label to show a specific application version.
+ */ + VersionLabels?: string[]; } export namespace DescribeApplicationVersionsMessage { @@ -2452,16 +2452,16 @@ export namespace DescribeApplicationVersionsMessage { * value. */ export interface OptionRestrictionRegex { - /** - *A unique name representing this regular expression.
- */ - Label?: string; - /** *The regular expression pattern that a string configuration option value with this * restriction must match.
*/ Pattern?: string; + + /** + *A unique name representing this regular expression.
+ */ + Label?: string; } export namespace OptionRestrictionRegex { @@ -2477,9 +2477,38 @@ export type ConfigurationOptionValueType = "List" | "Scalar"; */ export interface ConfigurationOptionDescription { /** - *The default value for this configuration option.
+ *A unique namespace identifying the option's associated AWS resource.
*/ - DefaultValue?: string; + Namespace?: string; + + /** + *If specified, the configuration option must be a string value that satisfies this + * regular expression.
+ */ + Regex?: OptionRestrictionRegex; + + /** + *If specified, the configuration option must be a string value no longer than this + * value.
+ */ + MaxLength?: number; + + /** + *If specified, the configuration option must be a numeric value greater than this + * value.
+ */ + MinValue?: number; + + /** + *The name of the configuration option.
+ */ + Name?: string; + + /** + *If specified, values for the configuration option are selected from this + * list.
+ */ + ValueOptions?: string[]; /** *An indication of which action is required if the value for this configuration option @@ -2506,66 +2535,6 @@ export interface ConfigurationOptionDescription { */ ChangeSeverity?: string; - /** - *
If specified, the configuration option must be a string value no longer than this - * value.
- */ - MaxLength?: number; - - /** - *An indication of whether the user defined this configuration option:
- *
- * true
: This configuration option was defined by the user. It is a valid
- * choice for specifying if this as an Option to Remove
when updating
- * configuration settings.
- * false
: This configuration was not defined by the user.
Constraint: You can remove only UserDefined
options from a configuration.
Valid Values: true
| false
- *
The name of the configuration option.
- */ - Name?: string; - - /** - *A unique namespace identifying the option's associated AWS resource.
- */ - Namespace?: string; - - /** - *If specified, the configuration option must be a numeric value less than this - * value.
- */ - MaxValue?: number; - - /** - *If specified, the configuration option must be a numeric value greater than this - * value.
- */ - MinValue?: number; - - /** - *If specified, the configuration option must be a string value that satisfies this - * regular expression.
- */ - Regex?: OptionRestrictionRegex; - - /** - *If specified, values for the configuration option are selected from this - * list.
- */ - ValueOptions?: string[]; - /** *An indication of which type of values this option has and whether it is allowable to * select one or more than one of the possible values:
@@ -2594,6 +2563,37 @@ export interface ConfigurationOptionDescription { * */ ValueType?: ConfigurationOptionValueType | string; + + /** + *The default value for this configuration option.
+ */ + DefaultValue?: string; + + /** + *An indication of whether the user defined this configuration option:
+ *
+ * true
: This configuration option was defined by the user. It is a valid
+ * choice for specifying if this as an Option to Remove
when updating
+ * configuration settings.
+ * false
: This configuration was not defined by the user.
Constraint: You can remove only UserDefined
options from a configuration.
Valid Values: true
| false
+ *
If specified, the configuration option must be a numeric value less than this + * value.
+ */ + MaxValue?: number; } export namespace ConfigurationOptionDescription { @@ -2603,14 +2603,9 @@ export namespace ConfigurationOptionDescription { } /** - *Describes the settings for a specified configuration set.
- */ -export interface ConfigurationOptionsDescription { - /** - *The ARN of the platform version.
- */ - PlatformArn?: string; - + *Describes the settings for a specified configuration set.
+ */ +export interface ConfigurationOptionsDescription { /** *The name of the solution stack these configuration options belong to.
*/ @@ -2620,6 +2615,11 @@ export interface ConfigurationOptionsDescription { *A list of ConfigurationOptionDescription.
*/ Options?: ConfigurationOptionDescription[]; + + /** + *The ARN of the platform version.
+ */ + PlatformArn?: string; } export namespace ConfigurationOptionsDescription { @@ -2633,38 +2633,38 @@ export namespace ConfigurationOptionsDescription { */ export interface DescribeConfigurationOptionsMessage { /** - *If specified, restricts the descriptions to only the specified options.
+ *The name of the configuration template whose configuration options you want to + * describe.
*/ - Options?: OptionSpecification[]; + TemplateName?: string; /** - *The name of the environment whose configuration options you want to describe.
+ *The name of the application associated with the configuration template or environment. + * Only needed if you want to describe the configuration options associated with either the + * configuration template or environment.
*/ - EnvironmentName?: string; + ApplicationName?: string; /** - *The name of the solution stack whose configuration options you want to - * describe.
+ *The ARN of the custom platform.
*/ - SolutionStackName?: string; + PlatformArn?: string; /** - *The name of the application associated with the configuration template or environment. - * Only needed if you want to describe the configuration options associated with either the - * configuration template or environment.
+ *The name of the environment whose configuration options you want to describe.
*/ - ApplicationName?: string; + EnvironmentName?: string; /** - *The name of the configuration template whose configuration options you want to + *
The name of the solution stack whose configuration options you want to * describe.
*/ - TemplateName?: string; + SolutionStackName?: string; /** - *The ARN of the custom platform.
+ *If specified, restricts the descriptions to only the specified options.
*/ - PlatformArn?: string; + Options?: OptionSpecification[]; } export namespace DescribeConfigurationOptionsMessage { @@ -2696,9 +2696,13 @@ export namespace ConfigurationSettingsDescriptions { */ export interface DescribeConfigurationSettingsMessage { /** - *The application for the environment or configuration template.
+ *The name of the environment to describe.
+ * Condition: You must specify either this or a TemplateName, but not both. If you
+ * specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination
error.
+ * If you do not specify either, AWS Elastic Beanstalk returns
+ * MissingRequiredParameter
error.
The name of the configuration template to describe.
@@ -2710,13 +2714,9 @@ export interface DescribeConfigurationSettingsMessage { TemplateName?: string; /** - *The name of the environment to describe.
- * Condition: You must specify either this or a TemplateName, but not both. If you
- * specify both, AWS Elastic Beanstalk returns an InvalidParameterCombination
error.
- * If you do not specify either, AWS Elastic Beanstalk returns
- * MissingRequiredParameter
error.
The application for the environment or configuration template.
*/ - EnvironmentName?: string; + ApplicationName: string | undefined; } export namespace DescribeConfigurationSettingsMessage { @@ -2740,12 +2740,6 @@ export enum EnvironmentHealthAttribute { *See the example below to learn how to create a request body.
*/ export interface DescribeEnvironmentHealthRequest { - /** - *Specify the environment by ID.
- *You must specify either this or an EnvironmentName, or both.
- */ - EnvironmentId?: string; - /** *Specify the response elements to return. To retrieve all attributes, set to
* All
. If no attribute names are specified, returns the name of the
@@ -2753,6 +2747,12 @@ export interface DescribeEnvironmentHealthRequest {
*/
AttributeNames?: (EnvironmentHealthAttribute | string)[];
+ /**
+ *
Specify the environment by ID.
+ *You must specify either this or an EnvironmentName, or both.
+ */ + EnvironmentId?: string; + /** *Specify the environment by name.
*You must specify either this or an EnvironmentName, or both.
@@ -2773,24 +2773,31 @@ export namespace DescribeEnvironmentHealthRequest { export interface InstanceHealthSummary { /** *- * Red. The health agent is reporting a high number of request - * failures or other issues for an instance or environment.
+ * Green. An instance is passing health checks and the health + * agent is not reporting any problems. */ - Degraded?: number; + Ok?: number; /** *- * Grey. AWS Elastic Beanstalk and the health agent are - * reporting no data on an instance.
+ * Red. The health agent is reporting a very high number of + * request failures or other issues for an instance or environment. */ - NoData?: number; + Severe?: number; + + /** + *+ * Grey. An operation is in progress on an instance within the + * command timeout.
+ */ + Pending?: number; /** ** Grey. AWS Elastic Beanstalk and the health agent are - * reporting an insufficient amount of data on an instance.
+ * reporting no data on an instance. */ - Unknown?: number; + NoData?: number; /** *@@ -2800,10 +2807,10 @@ export interface InstanceHealthSummary { /** *
- * Grey. An operation is in progress on an instance within the - * command timeout.
+ * Red. The health agent is reporting a high number of request + * failures or other issues for an instance or environment. */ - Pending?: number; + Degraded?: number; /** *@@ -2814,17 +2821,10 @@ export interface InstanceHealthSummary { /** *
- * Green. An instance is passing health checks and the health - * agent is not reporting any problems.
- */ - Ok?: number; - - /** - *- * Red. The health agent is reporting a very high number of - * request failures or other issues for an instance or environment.
+ * Grey. AWS Elastic Beanstalk and the health agent are + * reporting an insufficient amount of data on an instance. */ - Severe?: number; + Unknown?: number; } export namespace InstanceHealthSummary { @@ -2838,48 +2838,48 @@ export namespace InstanceHealthSummary { */ export interface DescribeEnvironmentHealthResult { /** - *The environment's name.
+ *The health status of the
+ * environment. For example, Ok
.
Application request metrics for the environment.
+ *The environment's operational status. Ready
, Launching
,
+ * Updating
, Terminating
, or Terminated
.
Summary health information for the instances in the environment.
+ *The health color of the + * environment.
*/ - InstancesHealth?: InstanceHealthSummary; + Color?: string; /** - *The date and time that the health information was retrieved.
+ *The environment's name.
*/ - RefreshedAt?: Date; + EnvironmentName?: string; /** - *Descriptions of the data that contributed to the environment's current health - * status.
+ *Application request metrics for the environment.
*/ - Causes?: string[]; + ApplicationMetrics?: ApplicationMetrics; /** - *The health color of the - * environment.
+ *Descriptions of the data that contributed to the environment's current health + * status.
*/ - Color?: string; + Causes?: string[]; /** - *The environment's operational status. Ready
, Launching
,
- * Updating
, Terminating
, or Terminated
.
Summary health information for the instances in the environment.
*/ - Status?: EnvironmentHealth | string; + InstancesHealth?: InstanceHealthSummary; /** - *The health status of the
- * environment. For example, Ok
.
The date and time that the health information was retrieved.
*/ - HealthStatus?: string; + RefreshedAt?: Date; } export namespace DescribeEnvironmentHealthResult { @@ -2912,14 +2912,14 @@ export namespace InvalidRequestException { */ export interface DescribeEnvironmentManagedActionHistoryRequest { /** - *The pagination token returned by a previous request.
+ *The maximum number of items to return for a single request.
*/ - NextToken?: string; + MaxItems?: number; /** - *The environment ID of the target environment.
+ *The pagination token returned by a previous request.
*/ - EnvironmentId?: string; + NextToken?: string; /** *The name of the target environment.
@@ -2927,9 +2927,9 @@ export interface DescribeEnvironmentManagedActionHistoryRequest { EnvironmentName?: string; /** - *The maximum number of items to return for a single request.
+ *The environment ID of the target environment.
*/ - MaxItems?: number; + EnvironmentId?: string; } export namespace DescribeEnvironmentManagedActionHistoryRequest { @@ -2952,14 +2952,14 @@ export type FailureType = */ export interface ManagedActionHistoryItem { /** - *The status of the action.
+ *A description of the managed action.
*/ - Status?: ActionHistoryStatus | string; + ActionDescription?: string; /** - *A unique identifier for the managed action.
+ *If the action failed, the type of failure.
*/ - ActionId?: string; + FailureType?: FailureType | string; /** *The type of the managed action.
@@ -2967,29 +2967,29 @@ export interface ManagedActionHistoryItem { ActionType?: ActionType | string; /** - *The date and time that the action finished executing.
+ *If the action failed, a description of the failure.
*/ - FinishedTime?: Date; + FailureDescription?: string; /** - *If the action failed, a description of the failure.
+ *A unique identifier for the managed action.
*/ - FailureDescription?: string; + ActionId?: string; /** - *If the action failed, the type of failure.
+ *The status of the action.
*/ - FailureType?: FailureType | string; + Status?: ActionHistoryStatus | string; /** - *The date and time that the action started executing.
+ *The date and time that the action finished executing.
*/ - ExecutedTime?: Date; + FinishedTime?: Date; /** - *A description of the managed action.
+ *The date and time that the action started executing.
*/ - ActionDescription?: string; + ExecutedTime?: Date; } export namespace ManagedActionHistoryItem { @@ -3024,6 +3024,11 @@ export namespace DescribeEnvironmentManagedActionHistoryResult { *Request to list an environment's upcoming and in-progress managed actions.
*/ export interface DescribeEnvironmentManagedActionsRequest { + /** + *To show only actions with a particular status, specify a status.
+ */ + Status?: ActionStatus | string; + /** *The environment ID of the target environment.
*/ @@ -3033,11 +3038,6 @@ export interface DescribeEnvironmentManagedActionsRequest { *The name of the target environment.
*/ EnvironmentName?: string; - - /** - *To show only actions with a particular status, specify a status.
- */ - Status?: ActionStatus | string; } export namespace DescribeEnvironmentManagedActionsRequest { @@ -3051,10 +3051,15 @@ export namespace DescribeEnvironmentManagedActionsRequest { */ export interface ManagedAction { /** - *The start time of the maintenance window in which the managed action will - * execute.
+ *A unique identifier for the managed action.
*/ - WindowStartTime?: Date; + ActionId?: string; + + /** + *The status of the managed action. If the action is Scheduled
, you can
+ * apply it immediately with ApplyEnvironmentManagedAction.
A description of the managed action.
@@ -3067,15 +3072,10 @@ export interface ManagedAction { ActionType?: ActionType | string; /** - *The status of the managed action. If the action is Scheduled
, you can
- * apply it immediately with ApplyEnvironmentManagedAction.
A unique identifier for the managed action.
+ *The start time of the maintenance window in which the managed action will + * execute.
*/ - ActionId?: string; + WindowStartTime?: Date; } export namespace ManagedAction { @@ -3196,14 +3196,14 @@ export namespace LoadBalancer { */ export interface Queue { /** - *The name of the queue.
+ *The URL of the queue.
*/ - Name?: string; + URL?: string; /** - *The URL of the queue.
+ *The name of the queue.
*/ - URL?: string; + Name?: string; } export namespace Queue { @@ -3233,44 +3233,44 @@ export namespace Trigger { */ export interface EnvironmentResourceDescription { /** - *The Amazon EC2 instances used by this environment.
+ *The queues used by this environment.
*/ - Instances?: Instance[]; + Queues?: Queue[]; /** - * The AutoScalingGroups
used by this environment.
The Amazon EC2 launch templates in use by this environment.
*/ - AutoScalingGroups?: AutoScalingGroup[]; + LaunchTemplates?: LaunchTemplate[]; /** - *The name of the environment.
+ * The AutoScalingGroups
used by this environment.
The AutoScaling
triggers in use by this environment.
The LoadBalancers in use by this environment.
*/ - Triggers?: Trigger[]; + LoadBalancers?: LoadBalancer[]; /** - *The queues used by this environment.
+ *The Amazon EC2 instances used by this environment.
*/ - Queues?: Queue[]; + Instances?: Instance[]; /** - *The LoadBalancers in use by this environment.
+ *The Auto Scaling launch configurations in use by this environment.
*/ - LoadBalancers?: LoadBalancer[]; + LaunchConfigurations?: LaunchConfiguration[]; /** - *The Amazon EC2 launch templates in use by this environment.
+ *The name of the environment.
*/ - LaunchTemplates?: LaunchTemplate[]; + EnvironmentName?: string; /** - *The Auto Scaling launch configurations in use by this environment.
+ *The AutoScaling
triggers in use by this environment.
Request to describe one or more environments.
*/ export interface DescribeEnvironmentsMessage { - /** - *If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only - * those that have the specified names.
- */ - EnvironmentNames?: string[]; - - /** - * If specified when IncludeDeleted
is set to true
, then
- * environments deleted after this date are displayed.
If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only - * those that are associated with this application version.
- */ - VersionLabel?: string; - /** *For a paginated request. Specify a token from a previous response page to retrieve the next response page. All other * parameter values must be identical to the ones specified in the initial request.
@@ -3332,9 +3314,17 @@ export interface DescribeEnvironmentsMessage { /** *If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only - * those that have the specified IDs.
+ * those that are associated with this application version. */ - EnvironmentIds?: string[]; + VersionLabel?: string; + + /** + *For a paginated request. Specify a maximum number of environments to include in + * each response.
+ *If no MaxRecords
is specified, all available environments are
+ * retrieved in a single response.
Indicates whether to include deleted environments:
@@ -3347,12 +3337,22 @@ export interface DescribeEnvironmentsMessage { IncludeDeleted?: boolean; /** - *For a paginated request. Specify a maximum number of environments to include in - * each response.
- *If no MaxRecords
is specified, all available environments are
- * retrieved in a single response.
If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only + * those that have the specified IDs.
*/ - MaxRecords?: number; + EnvironmentIds?: string[]; + + /** + * If specified when IncludeDeleted
is set to true
, then
+ * environments deleted after this date are displayed.
If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only + * those that have the specified names.
+ */ + EnvironmentNames?: string[]; } export namespace DescribeEnvironmentsMessage { @@ -3368,33 +3368,33 @@ export type EventSeverity = "DEBUG" | "ERROR" | "FATAL" | "INFO" | "TRACE" | "WA */ export interface DescribeEventsMessage { /** - * If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that
- * occur up to, but not including, the EndTime
.
Pagination token. If specified, the events return the next batch of results.
*/ - EndTime?: Date; + NextToken?: string; /** - *If specified, AWS Elastic Beanstalk restricts the described events to include only - * those associated with this request ID.
+ *If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only + * those associated with this application.
*/ - RequestId?: string; + ApplicationName?: string; /** - *If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that - * are associated with this environment configuration.
+ *If specified, limits the events returned from this call to include only those with the + * specified severity or higher.
*/ - TemplateName?: string; + Severity?: EventSeverity | string; /** - *Specifies the maximum number of events that can be returned, beginning with the most - * recent event.
+ *If specified, AWS Elastic Beanstalk restricts the returned descriptions to those + * associated with this application version.
*/ - MaxRecords?: number; + VersionLabel?: string; /** - *Pagination token. If specified, the events return the next batch of results.
+ *If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that + * occur on or after this time.
*/ - NextToken?: string; + StartTime?: Date; /** *If specified, AWS Elastic Beanstalk restricts the returned descriptions to those @@ -3403,40 +3403,40 @@ export interface DescribeEventsMessage { EnvironmentName?: string; /** - *
The ARN of a custom platform version. If specified, AWS Elastic Beanstalk restricts the - * returned descriptions to those associated with this custom platform version.
+ *If specified, AWS Elastic Beanstalk restricts the returned descriptions to those + * associated with this environment.
*/ - PlatformArn?: string; + EnvironmentId?: string; /** - *If specified, limits the events returned from this call to include only those with the - * specified severity or higher.
+ *If specified, AWS Elastic Beanstalk restricts the described events to include only + * those associated with this request ID.
*/ - Severity?: EventSeverity | string; + RequestId?: string; /** - *If specified, AWS Elastic Beanstalk restricts the returned descriptions to those - * associated with this environment.
+ * If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that
+ * occur up to, but not including, the EndTime
.
If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only - * those associated with this application.
+ *Specifies the maximum number of events that can be returned, beginning with the most + * recent event.
*/ - ApplicationName?: string; + MaxRecords?: number; /** *If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that - * occur on or after this time.
+ * are associated with this environment configuration. */ - StartTime?: Date; + TemplateName?: string; /** - *If specified, AWS Elastic Beanstalk restricts the returned descriptions to those - * associated with this application version.
+ *The ARN of a custom platform version. If specified, AWS Elastic Beanstalk restricts the + * returned descriptions to those associated with this custom platform version.
*/ - VersionLabel?: string; + PlatformArn?: string; } export namespace DescribeEventsMessage { @@ -3450,14 +3450,14 @@ export namespace DescribeEventsMessage { */ export interface EventDescription { /** - *The name of the configuration associated with this event.
+ *The application associated with the event.
*/ - TemplateName?: string; + ApplicationName?: string; /** - *The web service request ID for the activity of this event.
+ *The name of the environment associated with this event.
*/ - RequestId?: string; + EnvironmentName?: string; /** *The date when the event occurred.
@@ -3465,34 +3465,34 @@ export interface EventDescription { EventDate?: Date; /** - *The application associated with the event.
+ *The ARN of the platform version.
*/ - ApplicationName?: string; + PlatformArn?: string; /** - *The severity level of this event.
+ *The event message.
*/ - Severity?: EventSeverity | string; + Message?: string; /** - *The release label for the application version associated with this event.
+ *The name of the configuration associated with this event.
*/ - VersionLabel?: string; + TemplateName?: string; /** - *The ARN of the platform version.
+ *The severity level of this event.
*/ - PlatformArn?: string; + Severity?: EventSeverity | string; /** - *The name of the environment associated with this event.
+ *The web service request ID for the activity of this event.
*/ - EnvironmentName?: string; + RequestId?: string; /** - *The event message.
+ *The release label for the application version associated with this event.
*/ - Message?: string; + VersionLabel?: string; } export namespace EventDescription { @@ -3554,14 +3554,14 @@ export interface DescribeInstancesHealthRequest { NextToken?: string; /** - *Specify the AWS Elastic Beanstalk environment by ID.
+ *Specify the AWS Elastic Beanstalk environment by name.
*/ - EnvironmentId?: string; + EnvironmentName?: string; /** - *Specify the AWS Elastic Beanstalk environment by name.
+ *Specify the AWS Elastic Beanstalk environment by ID.
*/ - EnvironmentName?: string; + EnvironmentId?: string; } export namespace DescribeInstancesHealthRequest { @@ -3579,12 +3579,6 @@ export interface Deployment { */ VersionLabel?: string; - /** - *The ID of the deployment. This number increases by one each time that you deploy source - * code or change instance configuration settings.
- */ - DeploymentId?: number; - /** *The status of the deployment:
*For completed deployments, the time that the deployment ended.
*/ DeploymentTime?: Date; + + /** + *The ID of the deployment. This number increases by one each time that you deploy source + * code or change instance configuration settings.
+ */ + DeploymentId?: number; } export namespace Deployment { @@ -3622,16 +3622,18 @@ export namespace Deployment { */ export interface CPUUtilization { /** - *Percentage of time that the CPU has spent in the User
state over the last
+ *
Available on Linux environments only.
+ *Percentage of time that the CPU has spent in the Nice
state over the last
* 10 seconds.
Percentage of time that the CPU has spent in the Idle
state over the last
- * 10 seconds.
Available on Linux environments only.
+ *Percentage of time that the CPU has spent in the SoftIRQ
state over the
+ * last 10 seconds.
Available on Linux environments only.
@@ -3648,18 +3650,16 @@ export interface CPUUtilization { Privileged?: number; /** - *Available on Linux environments only.
- *Percentage of time that the CPU has spent in the SoftIRQ
state over the
- * last 10 seconds.
Percentage of time that the CPU has spent in the Idle
state over the last
+ * 10 seconds.
Available on Linux environments only.
- *Percentage of time that the CPU has spent in the I/O Wait
state over the
- * last 10 seconds.
Percentage of time that the CPU has spent in the User
state over the last
+ * 10 seconds.
Available on Linux environments only.
@@ -3670,10 +3670,10 @@ export interface CPUUtilization { /** *Available on Linux environments only.
- *Percentage of time that the CPU has spent in the Nice
state over the last
- * 10 seconds.
Percentage of time that the CPU has spent in the I/O Wait
state over the
+ * last 10 seconds.
The ID of the Amazon EC2 instance.
+ *Request metrics from your application.
*/ - InstanceId?: string; + ApplicationMetrics?: ApplicationMetrics; /** - *The instance's type.
+ *Operating system metrics from the instance.
*/ - InstanceType?: string; + System?: SystemStatus; /** - *Information about the most recent deployment to an instance.
+ *Represents the causes, which provide more information about the current health + * status.
*/ - Deployment?: Deployment; + Causes?: string[]; /** - *Operating system metrics from the instance.
+ *The ID of the Amazon EC2 instance.
*/ - System?: SystemStatus; + InstanceId?: string; /** *The time at which the EC2 instance was launched.
@@ -3736,11 +3737,14 @@ export interface SingleInstanceHealth { LaunchedAt?: Date; /** - *Represents the color indicator that gives you information about the health of the EC2 - * instance. For more information, see Health Colors and - * Statuses.
+ *Information about the most recent deployment to an instance.
*/ - Color?: string; + Deployment?: Deployment; + + /** + *The instance's type.
+ */ + InstanceType?: string; /** *Returns the health status of the specified instance. For more information, see Health
@@ -3754,15 +3758,11 @@ export interface SingleInstanceHealth {
AvailabilityZone?: string;
/**
- * Request metrics from your application. Represents the causes, which provide more information about the current health
- * status. Represents the color indicator that gives you information about the health of the EC2
+ * instance. For more information, see Health Colors and
+ * Statuses. Detailed health information about each instance. The output differs slightly between Linux and Windows environments. There is a difference
- * in the members that are supported under the Pagination token for the next page of results, if available. The date and time that the health information was retrieved. Pagination token for the next page of results, if available. Detailed health information about each instance. The output differs slightly between Linux and Windows environments. There is a difference
+ * in the members that are supported under the The type of virtualization used to create the custom AMI. THe ID of the image used to create the custom AMI. THe ID of the image used to create the custom AMI. The type of virtualization used to create the custom AMI. The version of the programming language. The name of the programming language. The name of the programming language. The version of the programming language. The platform branch to which the platform version belongs. The programming languages supported by the platform version. The status of the platform version. The date when the platform version was created. The name of the platform version. The date when the platform version was last updated. Information about the maintainer of the platform version. The version of the platform version. The AWS account ID of the person who created the platform version. The additions supported by the platform version. The programming languages supported by the platform version. The version of the operating system used by the platform version. The additions supported by the platform version. The custom AMIs supported by the platform version. The name of the solution stack used by the platform version. The platform branch to which the platform version belongs. The frameworks supported by the platform version. The AWS account ID of the person who created the platform version. The date when the platform version was last updated. The ARN of the platform version. The custom AMIs supported by the platform version. The operating system used by the platform version. The description of the platform version. The state of the platform version's branch in its lifecycle. Possible values: The category of the platform version. The tiers supported by the platform version. The operating system used by the platform version. The frameworks supported by the platform version. The ARN of the platform version. The name of the platform version. The state of the platform version's branch in its lifecycle. Possible values: The status of the platform version. The date when the platform version was created. The description of the platform version. The version of the operating system used by the platform version. The name of the solution stack used by the platform version. The version of the platform version. The category of the platform version. The tiers supported by the platform version. Information about the maintainer of the platform version. The state of the platform version in its lifecycle.
type.
type.Beta
| Supported
| Deprecated
|
+ * Retired
+ * Beta
| Supported
| Deprecated
|
- * Retired
- * SearchFilter
parameter.
The operator to apply to the Attribute
with each of the Values
.
- * Valid values vary by Attribute
.
The result attribute to which the filter values are applied. Valid values vary by API * action.
@@ -4082,6 +4076,12 @@ export interface SearchFilter { * attributes. Number of values and valid values vary byAttribute
.
*/
Values?: string[];
+
+ /**
+ * The operator to apply to the Attribute
with each of the Values
.
+ * Valid values vary by Attribute
.
For a paginated request. Specify a token from a previous response page to retrieve the - * next response page. All other parameter values must be identical to the ones specified in the - * initial request.
- *If no NextToken
is specified, the first page is retrieved.
The maximum number of platform branch values returned in one call.
*/ - NextToken?: string; + MaxRecords?: number; /** *Criteria for restricting the resulting list of platform branches. The filter is evaluated @@ -4174,9 +4171,12 @@ export interface ListPlatformBranchesRequest { Filters?: SearchFilter[]; /** - *
The maximum number of platform branch values returned in one call.
+ *For a paginated request. Specify a token from a previous response page to retrieve the + * next response page. All other parameter values must be identical to the ones specified in the + * initial request.
+ *If no NextToken
is specified, the first page is retrieved.
The support life cycle state of the platform branch.
- *Possible values: beta
| supported
| deprecated
|
- * retired
- *
The name of the platform branch.
- */ - BranchName?: string; - - /** - *The environment tiers that platform versions in this branch support.
- *Possible values: WebServer/Standard
| Worker/SQS/HTTP
- *
The name of the platform to which this platform branch belongs.
*/ - SupportedTierList?: string[]; + PlatformName?: string; /** *An ordinal number that designates the order in which platform branches have been added to @@ -4220,9 +4205,24 @@ export interface PlatformBranchSummary { BranchOrder?: number; /** - *
The name of the platform to which this platform branch belongs.
+ *The name of the platform branch.
*/ - PlatformName?: string; + BranchName?: string; + + /** + *The support life cycle state of the platform branch.
+ *Possible values: beta
| supported
| deprecated
|
+ * retired
+ *
The environment tiers that platform versions in this branch support.
+ *Possible values: WebServer/Standard
| Worker/SQS/HTTP
+ *
The operator to apply to the Type
with each of the
- * Values
.
Valid values: =
| !=
|
- * <
| <=
|
- * >
| >=
|
- * contains
| begins_with
| ends_with
- *
The list of values applied to the filtering platform version attribute. Only one value is supported * for all current operators.
@@ -4309,6 +4298,17 @@ export interface PlatformFilter { *The operator to apply to the Type
with each of the
+ * Values
.
Valid values: =
| !=
|
+ * <
| <=
|
+ * >
| >=
|
+ * contains
| begins_with
| ends_with
+ *
The maximum number of platform version values returned in one call.
- */ - MaxRecords?: number; - /** *For a paginated request. Specify a token from a previous response page to retrieve the * next response page. All other parameter values must be identical to the ones specified in the @@ -4337,6 +4332,11 @@ export interface ListPlatformVersionsRequest { * terms.
*/ Filters?: PlatformFilter[]; + + /** + *The maximum number of platform version values returned in one call.
+ */ + MaxRecords?: number; } export namespace ListPlatformVersionsRequest { @@ -4347,15 +4347,15 @@ export namespace ListPlatformVersionsRequest { export interface ListPlatformVersionsResult { /** - *In a paginated request, if this value isn't null
, it's the token that you can
- * pass in a subsequent request to get the next response page.
Summary information about the platform versions.
*/ - NextToken?: string; + PlatformSummaryList?: PlatformSummary[]; /** - *Summary information about the platform versions.
+ *In a paginated request, if this value isn't null
, it's the token that you can
+ * pass in a subsequent request to get the next response page.
The Amazon Resource Name (ARN) of the resource for which a tag list was requested.
+ *A list of tag key-value pairs.
*/ - ResourceArn?: string; + ResourceTags?: Tag[]; /** - *A list of tag key-value pairs.
+ *The Amazon Resource Name (ARN) of the resource for which a tag list was requested.
*/ - ResourceTags?: Tag[]; + ResourceArn?: string; } export namespace ResourceTagsDescriptionMessage { @@ -4437,20 +4437,20 @@ export namespace ResourceTypeNotSupportedException { */ export interface RebuildEnvironmentMessage { /** - *The name of the environment to rebuild.
- *Condition: You must specify either this or an EnvironmentId, or both. If you do not + *
The ID of the environment to rebuild.
+ * Condition: You must specify either this or an EnvironmentName, or both. If you do not
* specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
*
The ID of the environment to rebuild.
- *Condition: You must specify either this or an EnvironmentName, or both. If you do not + *
The name of the environment to rebuild.
+ * Condition: You must specify either this or an EnvironmentId, or both. If you do not
* specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
*
The name of the environment of the requested data.
+ *The ID of the environment of the requested data.
*If no such environment is found, RequestEnvironmentInfo
returns an
* InvalidParameterValue
error.
Condition: You must specify either this or an EnvironmentId, or both. If you do not + *
Condition: You must specify either this or an EnvironmentName, or both. If you do not
* specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
*
The ID of the environment of the requested data.
+ *The name of the environment of the requested data.
*If no such environment is found, RequestEnvironmentInfo
returns an
* InvalidParameterValue
error.
Condition: You must specify either this or an EnvironmentName, or both. If you do not + *
Condition: You must specify either this or an EnvironmentId, or both. If you do not
* specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
*
The type of information to retrieve.
+ *The ID of the data's environment.
+ *If no such environment is found, returns an InvalidParameterValue
+ * error.
Condition: You must specify either this or an EnvironmentName, or both. If you do not
+ * specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
+ * error.
The name of the data's environment.
* If no such environment is found, returns an InvalidParameterValue
error.
Condition: You must specify either this or an EnvironmentId, or both. If you do not
* specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
- *
The ID of the data's environment.
- *If no such environment is found, returns an InvalidParameterValue
- * error.
Condition: You must specify either this or an EnvironmentName, or both. If you do not
- * specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
- * error.
The type of information to retrieve.
+ */ + InfoType: EnvironmentInfoType | string | undefined; } export namespace RetrieveEnvironmentInfoMessage { @@ -4614,15 +4614,6 @@ export namespace RetrieveEnvironmentInfoResultMessage { *Swaps the CNAMEs of two environments.
*/ export interface SwapEnvironmentCNAMEsMessage { - /** - *The name of the source environment.
- * Condition: You must specify at least the SourceEnvironmentID
or the
- * SourceEnvironmentName
. You may also specify both. If you specify the
- * SourceEnvironmentName
, you must specify the
- * DestinationEnvironmentName
.
The name of the destination environment.
* Condition: You must specify at least the DestinationEnvironmentID
or the
@@ -4632,6 +4623,15 @@ export interface SwapEnvironmentCNAMEsMessage {
*/
DestinationEnvironmentName?: string;
+ /**
+ *
The name of the source environment.
+ * Condition: You must specify at least the SourceEnvironmentID
or the
+ * SourceEnvironmentName
. You may also specify both. If you specify the
+ * SourceEnvironmentName
, you must specify the
+ * DestinationEnvironmentName
.
The ID of the destination environment.
* Condition: You must specify at least the DestinationEnvironmentID
or the
@@ -4660,28 +4660,6 @@ export namespace SwapEnvironmentCNAMEsMessage {
*
Request to terminate an environment.
*/ export interface TerminateEnvironmentMessage { - /** - *Terminates the target environment even if another environment in the same group is - * dependent on it.
- */ - ForceTerminate?: boolean; - - /** - *The ID of the environment to terminate.
- * Condition: You must specify either this or an EnvironmentName, or both. If you do not
- * specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
- *
The name of the environment to terminate.
- * Condition: You must specify either this or an EnvironmentId, or both. If you do not
- * specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
- *
Indicates whether the associated AWS resources should shut down when the environment is * terminated:
@@ -4705,6 +4683,28 @@ export interface TerminateEnvironmentMessage { * */ TerminateResources?: boolean; + + /** + *Terminates the target environment even if another environment in the same group is + * dependent on it.
+ */ + ForceTerminate?: boolean; + + /** + *The name of the environment to terminate.
+ * Condition: You must specify either this or an EnvironmentId, or both. If you do not
+ * specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
+ *
The ID of the environment to terminate.
+ * Condition: You must specify either this or an EnvironmentName, or both. If you do not
+ * specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
+ *
Request to update an application.
*/ export interface UpdateApplicationMessage { - /** - *The name of the application to update. If no such application is found,
- * UpdateApplication
returns an InvalidParameterValue
error.
- *
A new description for the application.
*Default: If not specified, AWS Elastic Beanstalk does not update the * description.
*/ Description?: string; + + /** + *The name of the application to update. If no such application is found,
+ * UpdateApplication
returns an InvalidParameterValue
error.
+ *
The lifecycle configuration.
+ *The name of the application.
*/ - ResourceLifecycleConfig: ApplicationResourceLifecycleConfig | undefined; + ApplicationName: string | undefined; /** - *The name of the application.
+ *The lifecycle configuration.
*/ - ApplicationName: string | undefined; + ResourceLifecycleConfig: ApplicationResourceLifecycleConfig | undefined; } export namespace UpdateApplicationResourceLifecycleMessage { @@ -4760,11 +4760,6 @@ export namespace UpdateApplicationResourceLifecycleMessage { * */ export interface UpdateApplicationVersionMessage { - /** - *A new description for this version.
- */ - Description?: string; - /** *The name of the version to update.
*If no application version is found with this label, UpdateApplication
@@ -4778,6 +4773,11 @@ export interface UpdateApplicationVersionMessage {
* InvalidParameterValue
error.
A new description for this version.
+ */ + Description?: string; } export namespace UpdateApplicationVersionMessage { @@ -4790,14 +4790,6 @@ export namespace UpdateApplicationVersionMessage { *The result message containing the options for the specified solution stack.
*/ export interface UpdateConfigurationTemplateMessage { - /** - *The name of the application associated with the configuration template to - * update.
- * If no application is found with this name, UpdateConfigurationTemplate
- * returns an InvalidParameterValue
error.
The name of the configuration template to update.
*If no configuration template is found with this name, @@ -4812,17 +4804,25 @@ export interface UpdateConfigurationTemplateMessage { */ OptionSettings?: ConfigurationOptionSetting[]; + /** + *
A new description for the configuration.
+ */ + Description?: string; + + /** + *The name of the application associated with the configuration template to + * update.
+ * If no application is found with this name, UpdateConfigurationTemplate
+ * returns an InvalidParameterValue
error.
A list of configuration options to remove from the configuration set.
* Constraint: You can remove only UserDefined
configuration options.
*
A new description for the configuration.
- */ - Description?: string; } export namespace UpdateConfigurationTemplateMessage { @@ -4836,17 +4836,19 @@ export namespace UpdateConfigurationTemplateMessage { */ export interface UpdateEnvironmentMessage { /** - *A list of custom user-defined configuration options to remove from the configuration - * set for this environment.
+ *The name of the group to which the target environment belongs. Specify a group name + * only if the environment's name is specified in an environment manifest and not with the + * environment name or environment ID parameters. See Environment Manifest + * (env.yaml) for details.
*/ - OptionsToRemove?: OptionSpecification[]; + GroupName?: string; /** - *If this parameter is specified, AWS Elastic Beanstalk deploys this configuration
- * template to the environment. If no such configuration template is found, AWS Elastic Beanstalk
- * returns an InvalidParameterValue
error.
If this parameter is specified, AWS Elastic Beanstalk deploys the named application
+ * version to the environment. If no such application version is found, returns an
+ * InvalidParameterValue
error.
This specifies the platform version that the environment will run after the environment @@ -4855,12 +4857,19 @@ export interface UpdateEnvironmentMessage { SolutionStackName?: string; /** - *
The name of the group to which the target environment belongs. Specify a group name - * only if the environment's name is specified in an environment manifest and not with the - * environment name or environment ID parameters. See Environment Manifest - * (env.yaml) for details.
+ *If this parameter is specified, AWS Elastic Beanstalk updates the description of this + * environment.
*/ - GroupName?: string; + Description?: string; + + /** + *The name of the environment to update. If no environment with this name exists, AWS
+ * Elastic Beanstalk returns an InvalidParameterValue
error.
Condition: You must specify either this or an EnvironmentId, or both. If you do not
+ * specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
+ *
The ID of the environment to update.
@@ -4873,37 +4882,23 @@ export interface UpdateEnvironmentMessage { EnvironmentId?: string; /** - *The ARN of the platform, if used.
+ *This specifies the tier to use to update the environment.
+ *Condition: At this time, if you change the tier version, name, or type, AWS Elastic
+ * Beanstalk returns InvalidParameterValue
error.
If this parameter is specified, AWS Elastic Beanstalk updates the description of this - * environment.
+ *A list of custom user-defined configuration options to remove from the configuration + * set for this environment.
*/ - Description?: string; + OptionsToRemove?: OptionSpecification[]; /** *The name of the application with which the environment is associated.
*/ ApplicationName?: string; - /** - *This specifies the tier to use to update the environment.
- *Condition: At this time, if you change the tier version, name, or type, AWS Elastic
- * Beanstalk returns InvalidParameterValue
error.
The name of the environment to update. If no environment with this name exists, AWS
- * Elastic Beanstalk returns an InvalidParameterValue
error.
Condition: You must specify either this or an EnvironmentId, or both. If you do not
- * specify either, AWS Elastic Beanstalk returns MissingRequiredParameter
error.
- *
If specified, AWS Elastic Beanstalk updates the configuration set associated with the * running environment and sets the specified configuration options to the requested @@ -4912,11 +4907,16 @@ export interface UpdateEnvironmentMessage { OptionSettings?: ConfigurationOptionSetting[]; /** - *
If this parameter is specified, AWS Elastic Beanstalk deploys the named application
- * version to the environment. If no such application version is found, returns an
- * InvalidParameterValue
error.
If this parameter is specified, AWS Elastic Beanstalk deploys this configuration
+ * template to the environment. If no such configuration template is found, AWS Elastic Beanstalk
+ * returns an InvalidParameterValue
error.
The ARN of the platform, if used.
+ */ + PlatformArn?: string; } export namespace UpdateEnvironmentMessage { @@ -4947,6 +4947,12 @@ export namespace TooManyTagsException { } export interface UpdateTagsForResourceMessage { + /** + *The Amazon Resource Name (ARN) of the resouce to be updated.
+ *Must be the ARN of an Elastic Beanstalk resource.
+ */ + ResourceArn: string | undefined; + /** *A list of tag keys to remove. If a tag key doesn't exist, it is silently ignored.
*Specify at least one of these parameters: TagsToAdd
,
@@ -4961,12 +4967,6 @@ export interface UpdateTagsForResourceMessage {
* TagsToRemove
.
The Amazon Resource Name (ARN) of the resouce to be updated.
- *Must be the ARN of an Elastic Beanstalk resource.
- */ - ResourceArn: string | undefined; } export namespace UpdateTagsForResourceMessage { @@ -4986,6 +4986,16 @@ export interface ValidationMessage { */ Message?: string; + /** + *The namespace to which the option belongs.
+ */ + Namespace?: string; + + /** + *The name of the option.
+ */ + OptionName?: string; + /** *An indication of the severity of this message:
*The name of the option.
- */ - OptionName?: string; - - /** - *The namespace to which the option belongs.
- */ - Namespace?: string; } export namespace ValidationMessage { @@ -5041,27 +5041,27 @@ export namespace ConfigurationSettingsValidationMessages { */ export interface ValidateConfigurationSettingsMessage { /** - *The name of the environment to validate the settings against.
- *Condition: You cannot specify both this and a configuration template name.
+ *The name of the configuration template to validate the settings against.
+ *Condition: You cannot specify both this and an environment name.
*/ - EnvironmentName?: string; + TemplateName?: string; /** - *The name of the application that the configuration template or environment belongs - * to.
+ *A list of the options and desired values to evaluate.
*/ - ApplicationName: string | undefined; + OptionSettings: ConfigurationOptionSetting[] | undefined; /** - *The name of the configuration template to validate the settings against.
- *Condition: You cannot specify both this and an environment name.
+ *The name of the application that the configuration template or environment belongs + * to.
*/ - TemplateName?: string; + ApplicationName: string | undefined; /** - *A list of the options and desired values to evaluate.
+ *The name of the environment to validate the settings against.
+ *Condition: You cannot specify both this and a configuration template name.
*/ - OptionSettings: ConfigurationOptionSetting[] | undefined; + EnvironmentName?: string; } export namespace ValidateConfigurationSettingsMessage { diff --git a/clients/client-elastic-beanstalk/protocols/Aws_query.ts b/clients/client-elastic-beanstalk/protocols/Aws_query.ts index a19815ea8096..e8cc13675f34 100644 --- a/clients/client-elastic-beanstalk/protocols/Aws_query.ts +++ b/clients/client-elastic-beanstalk/protocols/Aws_query.ts @@ -4027,12 +4027,12 @@ const serializeAws_queryAbortEnvironmentUpdateMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.EnvironmentName !== undefined) { - entries["EnvironmentName"] = input.EnvironmentName; - } if (input.EnvironmentId !== undefined) { entries["EnvironmentId"] = input.EnvironmentId; } + if (input.EnvironmentName !== undefined) { + entries["EnvironmentName"] = input.EnvironmentName; + } return entries; }; @@ -4051,9 +4051,6 @@ const serializeAws_queryApplicationResourceLifecycleConfig = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.ServiceRole !== undefined) { - entries["ServiceRole"] = input.ServiceRole; - } if (input.VersionLifecycleConfig !== undefined) { const memberEntries = serializeAws_queryApplicationVersionLifecycleConfig(input.VersionLifecycleConfig, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4061,6 +4058,9 @@ const serializeAws_queryApplicationResourceLifecycleConfig = ( entries[loc] = value; }); } + if (input.ServiceRole !== undefined) { + entries["ServiceRole"] = input.ServiceRole; + } return entries; }; @@ -4069,17 +4069,17 @@ const serializeAws_queryApplicationVersionLifecycleConfig = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.MaxCountRule !== undefined) { - const memberEntries = serializeAws_queryMaxCountRule(input.MaxCountRule, context); + if (input.MaxAgeRule !== undefined) { + const memberEntries = serializeAws_queryMaxAgeRule(input.MaxAgeRule, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `MaxCountRule.${key}`; + const loc = `MaxAgeRule.${key}`; entries[loc] = value; }); } - if (input.MaxAgeRule !== undefined) { - const memberEntries = serializeAws_queryMaxAgeRule(input.MaxAgeRule, context); + if (input.MaxCountRule !== undefined) { + const memberEntries = serializeAws_queryMaxCountRule(input.MaxCountRule, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `MaxAgeRule.${key}`; + const loc = `MaxCountRule.${key}`; entries[loc] = value; }); } @@ -4108,31 +4108,31 @@ const serializeAws_queryAssociateEnvironmentOperationsRoleMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.OperationsRole !== undefined) { - entries["OperationsRole"] = input.OperationsRole; - } if (input.EnvironmentName !== undefined) { entries["EnvironmentName"] = input.EnvironmentName; } + if (input.OperationsRole !== undefined) { + entries["OperationsRole"] = input.OperationsRole; + } return entries; }; const serializeAws_queryBuildConfiguration = (input: BuildConfiguration, context: __SerdeContext): any => { const entries: any = {}; - if (input.ArtifactName !== undefined) { - entries["ArtifactName"] = input.ArtifactName; - } if (input.CodeBuildServiceRole !== undefined) { entries["CodeBuildServiceRole"] = input.CodeBuildServiceRole; } - if (input.ComputeType !== undefined) { - entries["ComputeType"] = input.ComputeType; + if (input.TimeoutInMinutes !== undefined) { + entries["TimeoutInMinutes"] = input.TimeoutInMinutes; + } + if (input.ArtifactName !== undefined) { + entries["ArtifactName"] = input.ArtifactName; } if (input.Image !== undefined) { entries["Image"] = input.Image; } - if (input.TimeoutInMinutes !== undefined) { - entries["TimeoutInMinutes"] = input.TimeoutInMinutes; + if (input.ComputeType !== undefined) { + entries["ComputeType"] = input.ComputeType; } return entries; }; @@ -4153,6 +4153,12 @@ const serializeAws_queryComposeEnvironmentsMessage = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; + } + if (input.GroupName !== undefined) { + entries["GroupName"] = input.GroupName; + } if (input.VersionLabels !== undefined) { const memberEntries = serializeAws_queryVersionLabels(input.VersionLabels, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4160,12 +4166,6 @@ const serializeAws_queryComposeEnvironmentsMessage = ( entries[loc] = value; }); } - if (input.GroupName !== undefined) { - entries["GroupName"] = input.GroupName; - } - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } return entries; }; @@ -4174,18 +4174,18 @@ const serializeAws_queryConfigurationOptionSetting = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.OptionName !== undefined) { - entries["OptionName"] = input.OptionName; - } - if (input.ResourceName !== undefined) { - entries["ResourceName"] = input.ResourceName; - } if (input.Value !== undefined) { entries["Value"] = input.Value; } if (input.Namespace !== undefined) { entries["Namespace"] = input.Namespace; } + if (input.OptionName !== undefined) { + entries["OptionName"] = input.OptionName; + } + if (input.ResourceName !== undefined) { + entries["ResourceName"] = input.ResourceName; + } return entries; }; @@ -4207,16 +4207,6 @@ const serializeAws_queryConfigurationOptionSettingsList = ( const serializeAws_queryCreateApplicationMessage = (input: CreateApplicationMessage, context: __SerdeContext): any => { const entries: any = {}; - if (input.ResourceLifecycleConfig !== undefined) { - const memberEntries = serializeAws_queryApplicationResourceLifecycleConfig(input.ResourceLifecycleConfig, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `ResourceLifecycleConfig.${key}`; - entries[loc] = value; - }); - } - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } if (input.Description !== undefined) { entries["Description"] = input.Description; } @@ -4227,6 +4217,16 @@ const serializeAws_queryCreateApplicationMessage = (input: CreateApplicationMess entries[loc] = value; }); } + if (input.ResourceLifecycleConfig !== undefined) { + const memberEntries = serializeAws_queryApplicationResourceLifecycleConfig(input.ResourceLifecycleConfig, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `ResourceLifecycleConfig.${key}`; + entries[loc] = value; + }); + } + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; + } return entries; }; @@ -4235,16 +4235,28 @@ const serializeAws_queryCreateApplicationVersionMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.Description !== undefined) { - entries["Description"] = input.Description; + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; } - if (input.BuildConfiguration !== undefined) { - const memberEntries = serializeAws_queryBuildConfiguration(input.BuildConfiguration, context); + if (input.VersionLabel !== undefined) { + entries["VersionLabel"] = input.VersionLabel; + } + if (input.SourceBundle !== undefined) { + const memberEntries = serializeAws_queryS3Location(input.SourceBundle, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `BuildConfiguration.${key}`; + const loc = `SourceBundle.${key}`; entries[loc] = value; }); } + if (input.Description !== undefined) { + entries["Description"] = input.Description; + } + if (input.AutoCreateApplication !== undefined) { + entries["AutoCreateApplication"] = input.AutoCreateApplication; + } + if (input.Process !== undefined) { + entries["Process"] = input.Process; + } if (input.SourceBuildInformation !== undefined) { const memberEntries = serializeAws_querySourceBuildInformation(input.SourceBuildInformation, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4252,29 +4264,17 @@ const serializeAws_queryCreateApplicationVersionMessage = ( entries[loc] = value; }); } - if (input.Tags !== undefined) { - const memberEntries = serializeAws_queryTags(input.Tags, context); + if (input.BuildConfiguration !== undefined) { + const memberEntries = serializeAws_queryBuildConfiguration(input.BuildConfiguration, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Tags.${key}`; + const loc = `BuildConfiguration.${key}`; entries[loc] = value; }); } - if (input.AutoCreateApplication !== undefined) { - entries["AutoCreateApplication"] = input.AutoCreateApplication; - } - if (input.Process !== undefined) { - entries["Process"] = input.Process; - } - if (input.VersionLabel !== undefined) { - entries["VersionLabel"] = input.VersionLabel; - } - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } - if (input.SourceBundle !== undefined) { - const memberEntries = serializeAws_queryS3Location(input.SourceBundle, context); + if (input.Tags !== undefined) { + const memberEntries = serializeAws_queryTags(input.Tags, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `SourceBundle.${key}`; + const loc = `Tags.${key}`; entries[loc] = value; }); } @@ -4286,31 +4286,22 @@ const serializeAws_queryCreateConfigurationTemplateMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.PlatformArn !== undefined) { - entries["PlatformArn"] = input.PlatformArn; + if (input.SolutionStackName !== undefined) { + entries["SolutionStackName"] = input.SolutionStackName; } - if (input.Tags !== undefined) { - const memberEntries = serializeAws_queryTags(input.Tags, context); + if (input.TemplateName !== undefined) { + entries["TemplateName"] = input.TemplateName; + } + if (input.SourceConfiguration !== undefined) { + const memberEntries = serializeAws_querySourceConfiguration(input.SourceConfiguration, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Tags.${key}`; + const loc = `SourceConfiguration.${key}`; entries[loc] = value; }); } if (input.ApplicationName !== undefined) { entries["ApplicationName"] = input.ApplicationName; } - if (input.Description !== undefined) { - entries["Description"] = input.Description; - } - if (input.EnvironmentId !== undefined) { - entries["EnvironmentId"] = input.EnvironmentId; - } - if (input.SolutionStackName !== undefined) { - entries["SolutionStackName"] = input.SolutionStackName; - } - if (input.TemplateName !== undefined) { - entries["TemplateName"] = input.TemplateName; - } if (input.OptionSettings !== undefined) { const memberEntries = serializeAws_queryConfigurationOptionSettingsList(input.OptionSettings, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4318,28 +4309,50 @@ const serializeAws_queryCreateConfigurationTemplateMessage = ( entries[loc] = value; }); } - if (input.SourceConfiguration !== undefined) { - const memberEntries = serializeAws_querySourceConfiguration(input.SourceConfiguration, context); + if (input.PlatformArn !== undefined) { + entries["PlatformArn"] = input.PlatformArn; + } + if (input.EnvironmentId !== undefined) { + entries["EnvironmentId"] = input.EnvironmentId; + } + if (input.Tags !== undefined) { + const memberEntries = serializeAws_queryTags(input.Tags, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `SourceConfiguration.${key}`; + const loc = `Tags.${key}`; entries[loc] = value; }); } + if (input.Description !== undefined) { + entries["Description"] = input.Description; + } return entries; }; const serializeAws_queryCreateEnvironmentMessage = (input: CreateEnvironmentMessage, context: __SerdeContext): any => { const entries: any = {}; - if (input.Tags !== undefined) { - const memberEntries = serializeAws_queryTags(input.Tags, context); + if (input.OptionsToRemove !== undefined) { + const memberEntries = serializeAws_queryOptionsSpecifierList(input.OptionsToRemove, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Tags.${key}`; + const loc = `OptionsToRemove.${key}`; entries[loc] = value; }); } if (input.TemplateName !== undefined) { entries["TemplateName"] = input.TemplateName; } + if (input.CNAMEPrefix !== undefined) { + entries["CNAMEPrefix"] = input.CNAMEPrefix; + } + if (input.EnvironmentName !== undefined) { + entries["EnvironmentName"] = input.EnvironmentName; + } + if (input.Tags !== undefined) { + const memberEntries = serializeAws_queryTags(input.Tags, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Tags.${key}`; + entries[loc] = value; + }); + } if (input.OptionSettings !== undefined) { const memberEntries = serializeAws_queryConfigurationOptionSettingsList(input.OptionSettings, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4347,6 +4360,9 @@ const serializeAws_queryCreateEnvironmentMessage = (input: CreateEnvironmentMess entries[loc] = value; }); } + if (input.OperationsRole !== undefined) { + entries["OperationsRole"] = input.OperationsRole; + } if (input.Tier !== undefined) { const memberEntries = serializeAws_queryEnvironmentTier(input.Tier, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4354,8 +4370,11 @@ const serializeAws_queryCreateEnvironmentMessage = (input: CreateEnvironmentMess entries[loc] = value; }); } - if (input.OperationsRole !== undefined) { - entries["OperationsRole"] = input.OperationsRole; + if (input.Description !== undefined) { + entries["Description"] = input.Description; + } + if (input.SolutionStackName !== undefined) { + entries["SolutionStackName"] = input.SolutionStackName; } if (input.GroupName !== undefined) { entries["GroupName"] = input.GroupName; @@ -4363,31 +4382,12 @@ const serializeAws_queryCreateEnvironmentMessage = (input: CreateEnvironmentMess if (input.VersionLabel !== undefined) { entries["VersionLabel"] = input.VersionLabel; } - if (input.CNAMEPrefix !== undefined) { - entries["CNAMEPrefix"] = input.CNAMEPrefix; - } if (input.ApplicationName !== undefined) { entries["ApplicationName"] = input.ApplicationName; } if (input.PlatformArn !== undefined) { entries["PlatformArn"] = input.PlatformArn; } - if (input.OptionsToRemove !== undefined) { - const memberEntries = serializeAws_queryOptionsSpecifierList(input.OptionsToRemove, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `OptionsToRemove.${key}`; - entries[loc] = value; - }); - } - if (input.SolutionStackName !== undefined) { - entries["SolutionStackName"] = input.SolutionStackName; - } - if (input.Description !== undefined) { - entries["Description"] = input.Description; - } - if (input.EnvironmentName !== undefined) { - entries["EnvironmentName"] = input.EnvironmentName; - } return entries; }; @@ -4396,17 +4396,20 @@ const serializeAws_queryCreatePlatformVersionRequest = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.Tags !== undefined) { - const memberEntries = serializeAws_queryTags(input.Tags, context); + if (input.PlatformDefinitionBundle !== undefined) { + const memberEntries = serializeAws_queryS3Location(input.PlatformDefinitionBundle, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Tags.${key}`; + const loc = `PlatformDefinitionBundle.${key}`; entries[loc] = value; }); } - if (input.PlatformDefinitionBundle !== undefined) { - const memberEntries = serializeAws_queryS3Location(input.PlatformDefinitionBundle, context); + if (input.PlatformName !== undefined) { + entries["PlatformName"] = input.PlatformName; + } + if (input.Tags !== undefined) { + const memberEntries = serializeAws_queryTags(input.Tags, context); Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `PlatformDefinitionBundle.${key}`; + const loc = `Tags.${key}`; entries[loc] = value; }); } @@ -4423,9 +4426,6 @@ const serializeAws_queryCreatePlatformVersionRequest = ( entries[loc] = value; }); } - if (input.PlatformName !== undefined) { - entries["PlatformName"] = input.PlatformName; - } return entries; }; @@ -4445,15 +4445,15 @@ const serializeAws_queryDeleteApplicationVersionMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.DeleteSourceBundle !== undefined) { - entries["DeleteSourceBundle"] = input.DeleteSourceBundle; - } if (input.ApplicationName !== undefined) { entries["ApplicationName"] = input.ApplicationName; } if (input.VersionLabel !== undefined) { entries["VersionLabel"] = input.VersionLabel; } + if (input.DeleteSourceBundle !== undefined) { + entries["DeleteSourceBundle"] = input.DeleteSourceBundle; + } return entries; }; @@ -4462,12 +4462,12 @@ const serializeAws_queryDeleteConfigurationTemplateMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } if (input.TemplateName !== undefined) { entries["TemplateName"] = input.TemplateName; } + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; + } return entries; }; @@ -4516,22 +4516,22 @@ const serializeAws_queryDescribeApplicationVersionsMessage = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.MaxRecords !== undefined) { + entries["MaxRecords"] = input.MaxRecords; + } if (input.ApplicationName !== undefined) { entries["ApplicationName"] = input.ApplicationName; } - if (input.VersionLabels !== undefined) { + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; + } + if (input.VersionLabels !== undefined) { const memberEntries = serializeAws_queryVersionLabelsList(input.VersionLabels, context); Object.entries(memberEntries).forEach(([key, value]) => { const loc = `VersionLabels.${key}`; entries[loc] = value; }); } - if (input.MaxRecords !== undefined) { - entries["MaxRecords"] = input.MaxRecords; - } - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; - } return entries; }; @@ -4540,12 +4540,14 @@ const serializeAws_queryDescribeConfigurationOptionsMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.Options !== undefined) { - const memberEntries = serializeAws_queryOptionsSpecifierList(input.Options, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `Options.${key}`; - entries[loc] = value; - }); + if (input.TemplateName !== undefined) { + entries["TemplateName"] = input.TemplateName; + } + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; + } + if (input.PlatformArn !== undefined) { + entries["PlatformArn"] = input.PlatformArn; } if (input.EnvironmentName !== undefined) { entries["EnvironmentName"] = input.EnvironmentName; @@ -4553,14 +4555,12 @@ const serializeAws_queryDescribeConfigurationOptionsMessage = ( if (input.SolutionStackName !== undefined) { entries["SolutionStackName"] = input.SolutionStackName; } - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } - if (input.TemplateName !== undefined) { - entries["TemplateName"] = input.TemplateName; - } - if (input.PlatformArn !== undefined) { - entries["PlatformArn"] = input.PlatformArn; + if (input.Options !== undefined) { + const memberEntries = serializeAws_queryOptionsSpecifierList(input.Options, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Options.${key}`; + entries[loc] = value; + }); } return entries; }; @@ -4570,14 +4570,14 @@ const serializeAws_queryDescribeConfigurationSettingsMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; + if (input.EnvironmentName !== undefined) { + entries["EnvironmentName"] = input.EnvironmentName; } if (input.TemplateName !== undefined) { entries["TemplateName"] = input.TemplateName; } - if (input.EnvironmentName !== undefined) { - entries["EnvironmentName"] = input.EnvironmentName; + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; } return entries; }; @@ -4587,9 +4587,6 @@ const serializeAws_queryDescribeEnvironmentHealthRequest = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.EnvironmentId !== undefined) { - entries["EnvironmentId"] = input.EnvironmentId; - } if (input.AttributeNames !== undefined) { const memberEntries = serializeAws_queryEnvironmentHealthAttributes(input.AttributeNames, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4597,6 +4594,9 @@ const serializeAws_queryDescribeEnvironmentHealthRequest = ( entries[loc] = value; }); } + if (input.EnvironmentId !== undefined) { + entries["EnvironmentId"] = input.EnvironmentId; + } if (input.EnvironmentName !== undefined) { entries["EnvironmentName"] = input.EnvironmentName; } @@ -4608,17 +4608,17 @@ const serializeAws_queryDescribeEnvironmentManagedActionHistoryRequest = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.MaxItems !== undefined) { + entries["MaxItems"] = input.MaxItems; + } if (input.NextToken !== undefined) { entries["NextToken"] = input.NextToken; } - if (input.EnvironmentId !== undefined) { - entries["EnvironmentId"] = input.EnvironmentId; - } if (input.EnvironmentName !== undefined) { entries["EnvironmentName"] = input.EnvironmentName; } - if (input.MaxItems !== undefined) { - entries["MaxItems"] = input.MaxItems; + if (input.EnvironmentId !== undefined) { + entries["EnvironmentId"] = input.EnvironmentId; } return entries; }; @@ -4628,15 +4628,15 @@ const serializeAws_queryDescribeEnvironmentManagedActionsRequest = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.Status !== undefined) { + entries["Status"] = input.Status; + } if (input.EnvironmentId !== undefined) { entries["EnvironmentId"] = input.EnvironmentId; } if (input.EnvironmentName !== undefined) { entries["EnvironmentName"] = input.EnvironmentName; } - if (input.Status !== undefined) { - entries["Status"] = input.Status; - } return entries; }; @@ -4659,24 +4659,20 @@ const serializeAws_queryDescribeEnvironmentsMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.EnvironmentNames !== undefined) { - const memberEntries = serializeAws_queryEnvironmentNamesList(input.EnvironmentNames, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `EnvironmentNames.${key}`; - entries[loc] = value; - }); + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; } - if (input.IncludedDeletedBackTo !== undefined) { - entries["IncludedDeletedBackTo"] = input.IncludedDeletedBackTo.toISOString().split(".")[0] + "Z"; + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; } if (input.VersionLabel !== undefined) { entries["VersionLabel"] = input.VersionLabel; } - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; + if (input.MaxRecords !== undefined) { + entries["MaxRecords"] = input.MaxRecords; } - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; + if (input.IncludeDeleted !== undefined) { + entries["IncludeDeleted"] = input.IncludeDeleted; } if (input.EnvironmentIds !== undefined) { const memberEntries = serializeAws_queryEnvironmentIdList(input.EnvironmentIds, context); @@ -4685,52 +4681,56 @@ const serializeAws_queryDescribeEnvironmentsMessage = ( entries[loc] = value; }); } - if (input.IncludeDeleted !== undefined) { - entries["IncludeDeleted"] = input.IncludeDeleted; + if (input.IncludedDeletedBackTo !== undefined) { + entries["IncludedDeletedBackTo"] = input.IncludedDeletedBackTo.toISOString().split(".")[0] + "Z"; } - if (input.MaxRecords !== undefined) { - entries["MaxRecords"] = input.MaxRecords; + if (input.EnvironmentNames !== undefined) { + const memberEntries = serializeAws_queryEnvironmentNamesList(input.EnvironmentNames, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `EnvironmentNames.${key}`; + entries[loc] = value; + }); } return entries; }; const serializeAws_queryDescribeEventsMessage = (input: DescribeEventsMessage, context: __SerdeContext): any => { const entries: any = {}; - if (input.EndTime !== undefined) { - entries["EndTime"] = input.EndTime.toISOString().split(".")[0] + "Z"; + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; } - if (input.RequestId !== undefined) { - entries["RequestId"] = input.RequestId; + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; } - if (input.TemplateName !== undefined) { - entries["TemplateName"] = input.TemplateName; + if (input.Severity !== undefined) { + entries["Severity"] = input.Severity; } - if (input.MaxRecords !== undefined) { - entries["MaxRecords"] = input.MaxRecords; + if (input.VersionLabel !== undefined) { + entries["VersionLabel"] = input.VersionLabel; } - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; + if (input.StartTime !== undefined) { + entries["StartTime"] = input.StartTime.toISOString().split(".")[0] + "Z"; } if (input.EnvironmentName !== undefined) { entries["EnvironmentName"] = input.EnvironmentName; } - if (input.PlatformArn !== undefined) { - entries["PlatformArn"] = input.PlatformArn; - } - if (input.Severity !== undefined) { - entries["Severity"] = input.Severity; - } if (input.EnvironmentId !== undefined) { entries["EnvironmentId"] = input.EnvironmentId; } - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; + if (input.RequestId !== undefined) { + entries["RequestId"] = input.RequestId; } - if (input.StartTime !== undefined) { - entries["StartTime"] = input.StartTime.toISOString().split(".")[0] + "Z"; + if (input.EndTime !== undefined) { + entries["EndTime"] = input.EndTime.toISOString().split(".")[0] + "Z"; } - if (input.VersionLabel !== undefined) { - entries["VersionLabel"] = input.VersionLabel; + if (input.MaxRecords !== undefined) { + entries["MaxRecords"] = input.MaxRecords; + } + if (input.TemplateName !== undefined) { + entries["TemplateName"] = input.TemplateName; + } + if (input.PlatformArn !== undefined) { + entries["PlatformArn"] = input.PlatformArn; } return entries; }; @@ -4750,12 +4750,12 @@ const serializeAws_queryDescribeInstancesHealthRequest = ( if (input.NextToken !== undefined) { entries["NextToken"] = input.NextToken; } - if (input.EnvironmentId !== undefined) { - entries["EnvironmentId"] = input.EnvironmentId; - } if (input.EnvironmentName !== undefined) { entries["EnvironmentName"] = input.EnvironmentName; } + if (input.EnvironmentId !== undefined) { + entries["EnvironmentId"] = input.EnvironmentId; + } return entries; }; @@ -4816,15 +4816,15 @@ const serializeAws_queryEnvironmentNamesList = (input: string[], context: __Serd const serializeAws_queryEnvironmentTier = (input: EnvironmentTier, context: __SerdeContext): any => { const entries: any = {}; + if (input.Version !== undefined) { + entries["Version"] = input.Version; + } if (input.Name !== undefined) { entries["Name"] = input.Name; } if (input.Type !== undefined) { entries["Type"] = input.Type; } - if (input.Version !== undefined) { - entries["Version"] = input.Version; - } return entries; }; @@ -4846,8 +4846,8 @@ const serializeAws_queryListPlatformBranchesRequest = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.NextToken !== undefined) { - entries["NextToken"] = input.NextToken; + if (input.MaxRecords !== undefined) { + entries["MaxRecords"] = input.MaxRecords; } if (input.Filters !== undefined) { const memberEntries = serializeAws_querySearchFilters(input.Filters, context); @@ -4856,8 +4856,8 @@ const serializeAws_queryListPlatformBranchesRequest = ( entries[loc] = value; }); } - if (input.MaxRecords !== undefined) { - entries["MaxRecords"] = input.MaxRecords; + if (input.NextToken !== undefined) { + entries["NextToken"] = input.NextToken; } return entries; }; @@ -4867,9 +4867,6 @@ const serializeAws_queryListPlatformVersionsRequest = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.MaxRecords !== undefined) { - entries["MaxRecords"] = input.MaxRecords; - } if (input.NextToken !== undefined) { entries["NextToken"] = input.NextToken; } @@ -4880,6 +4877,9 @@ const serializeAws_queryListPlatformVersionsRequest = ( entries[loc] = value; }); } + if (input.MaxRecords !== undefined) { + entries["MaxRecords"] = input.MaxRecords; + } return entries; }; @@ -4910,29 +4910,29 @@ const serializeAws_queryMaxAgeRule = (input: MaxAgeRule, context: __SerdeContext const serializeAws_queryMaxCountRule = (input: MaxCountRule, context: __SerdeContext): any => { const entries: any = {}; - if (input.Enabled !== undefined) { - entries["Enabled"] = input.Enabled; - } if (input.MaxCount !== undefined) { entries["MaxCount"] = input.MaxCount; } if (input.DeleteSourceFromS3 !== undefined) { entries["DeleteSourceFromS3"] = input.DeleteSourceFromS3; } + if (input.Enabled !== undefined) { + entries["Enabled"] = input.Enabled; + } return entries; }; const serializeAws_queryOptionSpecification = (input: OptionSpecification, context: __SerdeContext): any => { const entries: any = {}; + if (input.ResourceName !== undefined) { + entries["ResourceName"] = input.ResourceName; + } if (input.OptionName !== undefined) { entries["OptionName"] = input.OptionName; } if (input.Namespace !== undefined) { entries["Namespace"] = input.Namespace; } - if (input.ResourceName !== undefined) { - entries["ResourceName"] = input.ResourceName; - } return entries; }; @@ -4954,9 +4954,6 @@ const serializeAws_queryPlatformFilter = (input: PlatformFilter, context: __Serd if (input.Type !== undefined) { entries["Type"] = input.Type; } - if (input.Operator !== undefined) { - entries["Operator"] = input.Operator; - } if (input.Values !== undefined) { const memberEntries = serializeAws_queryPlatformFilterValueList(input.Values, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -4964,6 +4961,9 @@ const serializeAws_queryPlatformFilter = (input: PlatformFilter, context: __Serd entries[loc] = value; }); } + if (input.Operator !== undefined) { + entries["Operator"] = input.Operator; + } return entries; }; @@ -4995,12 +4995,12 @@ const serializeAws_queryRebuildEnvironmentMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.EnvironmentName !== undefined) { - entries["EnvironmentName"] = input.EnvironmentName; - } if (input.EnvironmentId !== undefined) { entries["EnvironmentId"] = input.EnvironmentId; } + if (input.EnvironmentName !== undefined) { + entries["EnvironmentName"] = input.EnvironmentName; + } return entries; }; @@ -5012,12 +5012,12 @@ const serializeAws_queryRequestEnvironmentInfoMessage = ( if (input.InfoType !== undefined) { entries["InfoType"] = input.InfoType; } - if (input.EnvironmentName !== undefined) { - entries["EnvironmentName"] = input.EnvironmentName; - } if (input.EnvironmentId !== undefined) { entries["EnvironmentId"] = input.EnvironmentId; } + if (input.EnvironmentName !== undefined) { + entries["EnvironmentName"] = input.EnvironmentName; + } return entries; }; @@ -5037,34 +5037,31 @@ const serializeAws_queryRetrieveEnvironmentInfoMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.InfoType !== undefined) { - entries["InfoType"] = input.InfoType; + if (input.EnvironmentId !== undefined) { + entries["EnvironmentId"] = input.EnvironmentId; } if (input.EnvironmentName !== undefined) { entries["EnvironmentName"] = input.EnvironmentName; } - if (input.EnvironmentId !== undefined) { - entries["EnvironmentId"] = input.EnvironmentId; + if (input.InfoType !== undefined) { + entries["InfoType"] = input.InfoType; } return entries; }; const serializeAws_queryS3Location = (input: S3Location, context: __SerdeContext): any => { const entries: any = {}; - if (input.S3Bucket !== undefined) { - entries["S3Bucket"] = input.S3Bucket; - } if (input.S3Key !== undefined) { entries["S3Key"] = input.S3Key; } + if (input.S3Bucket !== undefined) { + entries["S3Bucket"] = input.S3Bucket; + } return entries; }; const serializeAws_querySearchFilter = (input: SearchFilter, context: __SerdeContext): any => { const entries: any = {}; - if (input.Operator !== undefined) { - entries["Operator"] = input.Operator; - } if (input.Attribute !== undefined) { entries["Attribute"] = input.Attribute; } @@ -5075,6 +5072,9 @@ const serializeAws_querySearchFilter = (input: SearchFilter, context: __SerdeCon entries[loc] = value; }); } + if (input.Operator !== undefined) { + entries["Operator"] = input.Operator; + } return entries; }; @@ -5103,26 +5103,26 @@ const serializeAws_querySearchFilterValues = (input: string[], context: __SerdeC const serializeAws_querySourceBuildInformation = (input: SourceBuildInformation, context: __SerdeContext): any => { const entries: any = {}; + if (input.SourceType !== undefined) { + entries["SourceType"] = input.SourceType; + } if (input.SourceLocation !== undefined) { entries["SourceLocation"] = input.SourceLocation; } if (input.SourceRepository !== undefined) { entries["SourceRepository"] = input.SourceRepository; } - if (input.SourceType !== undefined) { - entries["SourceType"] = input.SourceType; - } return entries; }; const serializeAws_querySourceConfiguration = (input: SourceConfiguration, context: __SerdeContext): any => { const entries: any = {}; - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } if (input.TemplateName !== undefined) { entries["TemplateName"] = input.TemplateName; } + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; + } return entries; }; @@ -5131,12 +5131,12 @@ const serializeAws_querySwapEnvironmentCNAMEsMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.SourceEnvironmentName !== undefined) { - entries["SourceEnvironmentName"] = input.SourceEnvironmentName; - } if (input.DestinationEnvironmentName !== undefined) { entries["DestinationEnvironmentName"] = input.DestinationEnvironmentName; } + if (input.SourceEnvironmentName !== undefined) { + entries["SourceEnvironmentName"] = input.SourceEnvironmentName; + } if (input.DestinationEnvironmentId !== undefined) { entries["DestinationEnvironmentId"] = input.DestinationEnvironmentId; } @@ -5148,12 +5148,12 @@ const serializeAws_querySwapEnvironmentCNAMEsMessage = ( const serializeAws_queryTag = (input: Tag, context: __SerdeContext): any => { const entries: any = {}; - if (input.Value !== undefined) { - entries["Value"] = input.Value; - } if (input.Key !== undefined) { entries["Key"] = input.Key; } + if (input.Value !== undefined) { + entries["Value"] = input.Value; + } return entries; }; @@ -5198,29 +5198,29 @@ const serializeAws_queryTerminateEnvironmentMessage = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.TerminateResources !== undefined) { + entries["TerminateResources"] = input.TerminateResources; + } if (input.ForceTerminate !== undefined) { entries["ForceTerminate"] = input.ForceTerminate; } - if (input.EnvironmentId !== undefined) { - entries["EnvironmentId"] = input.EnvironmentId; - } if (input.EnvironmentName !== undefined) { entries["EnvironmentName"] = input.EnvironmentName; } - if (input.TerminateResources !== undefined) { - entries["TerminateResources"] = input.TerminateResources; + if (input.EnvironmentId !== undefined) { + entries["EnvironmentId"] = input.EnvironmentId; } return entries; }; const serializeAws_queryUpdateApplicationMessage = (input: UpdateApplicationMessage, context: __SerdeContext): any => { const entries: any = {}; - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } if (input.Description !== undefined) { entries["Description"] = input.Description; } + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; + } return entries; }; @@ -5229,6 +5229,9 @@ const serializeAws_queryUpdateApplicationResourceLifecycleMessage = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; + } if (input.ResourceLifecycleConfig !== undefined) { const memberEntries = serializeAws_queryApplicationResourceLifecycleConfig(input.ResourceLifecycleConfig, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5236,9 +5239,6 @@ const serializeAws_queryUpdateApplicationResourceLifecycleMessage = ( entries[loc] = value; }); } - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } return entries; }; @@ -5247,15 +5247,15 @@ const serializeAws_queryUpdateApplicationVersionMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.Description !== undefined) { - entries["Description"] = input.Description; - } if (input.VersionLabel !== undefined) { entries["VersionLabel"] = input.VersionLabel; } if (input.ApplicationName !== undefined) { entries["ApplicationName"] = input.ApplicationName; } + if (input.Description !== undefined) { + entries["Description"] = input.Description; + } return entries; }; @@ -5264,9 +5264,6 @@ const serializeAws_queryUpdateConfigurationTemplateMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } if (input.TemplateName !== undefined) { entries["TemplateName"] = input.TemplateName; } @@ -5277,6 +5274,12 @@ const serializeAws_queryUpdateConfigurationTemplateMessage = ( entries[loc] = value; }); } + if (input.Description !== undefined) { + entries["Description"] = input.Description; + } + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; + } if (input.OptionsToRemove !== undefined) { const memberEntries = serializeAws_queryOptionsSpecifierList(input.OptionsToRemove, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5284,41 +5287,28 @@ const serializeAws_queryUpdateConfigurationTemplateMessage = ( entries[loc] = value; }); } - if (input.Description !== undefined) { - entries["Description"] = input.Description; - } return entries; }; const serializeAws_queryUpdateEnvironmentMessage = (input: UpdateEnvironmentMessage, context: __SerdeContext): any => { const entries: any = {}; - if (input.OptionsToRemove !== undefined) { - const memberEntries = serializeAws_queryOptionsSpecifierList(input.OptionsToRemove, context); - Object.entries(memberEntries).forEach(([key, value]) => { - const loc = `OptionsToRemove.${key}`; - entries[loc] = value; - }); - } - if (input.TemplateName !== undefined) { - entries["TemplateName"] = input.TemplateName; - } - if (input.SolutionStackName !== undefined) { - entries["SolutionStackName"] = input.SolutionStackName; - } if (input.GroupName !== undefined) { entries["GroupName"] = input.GroupName; } - if (input.EnvironmentId !== undefined) { - entries["EnvironmentId"] = input.EnvironmentId; + if (input.VersionLabel !== undefined) { + entries["VersionLabel"] = input.VersionLabel; } - if (input.PlatformArn !== undefined) { - entries["PlatformArn"] = input.PlatformArn; + if (input.SolutionStackName !== undefined) { + entries["SolutionStackName"] = input.SolutionStackName; } if (input.Description !== undefined) { entries["Description"] = input.Description; } - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; + if (input.EnvironmentName !== undefined) { + entries["EnvironmentName"] = input.EnvironmentName; + } + if (input.EnvironmentId !== undefined) { + entries["EnvironmentId"] = input.EnvironmentId; } if (input.Tier !== undefined) { const memberEntries = serializeAws_queryEnvironmentTier(input.Tier, context); @@ -5327,8 +5317,15 @@ const serializeAws_queryUpdateEnvironmentMessage = (input: UpdateEnvironmentMess entries[loc] = value; }); } - if (input.EnvironmentName !== undefined) { - entries["EnvironmentName"] = input.EnvironmentName; + if (input.OptionsToRemove !== undefined) { + const memberEntries = serializeAws_queryOptionsSpecifierList(input.OptionsToRemove, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `OptionsToRemove.${key}`; + entries[loc] = value; + }); + } + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; } if (input.OptionSettings !== undefined) { const memberEntries = serializeAws_queryConfigurationOptionSettingsList(input.OptionSettings, context); @@ -5337,8 +5334,11 @@ const serializeAws_queryUpdateEnvironmentMessage = (input: UpdateEnvironmentMess entries[loc] = value; }); } - if (input.VersionLabel !== undefined) { - entries["VersionLabel"] = input.VersionLabel; + if (input.TemplateName !== undefined) { + entries["TemplateName"] = input.TemplateName; + } + if (input.PlatformArn !== undefined) { + entries["PlatformArn"] = input.PlatformArn; } return entries; }; @@ -5348,6 +5348,9 @@ const serializeAws_queryUpdateTagsForResourceMessage = ( context: __SerdeContext ): any => { const entries: any = {}; + if (input.ResourceArn !== undefined) { + entries["ResourceArn"] = input.ResourceArn; + } if (input.TagsToRemove !== undefined) { const memberEntries = serializeAws_queryTagKeyList(input.TagsToRemove, context); Object.entries(memberEntries).forEach(([key, value]) => { @@ -5362,9 +5365,6 @@ const serializeAws_queryUpdateTagsForResourceMessage = ( entries[loc] = value; }); } - if (input.ResourceArn !== undefined) { - entries["ResourceArn"] = input.ResourceArn; - } return entries; }; @@ -5373,12 +5373,6 @@ const serializeAws_queryValidateConfigurationSettingsMessage = ( context: __SerdeContext ): any => { const entries: any = {}; - if (input.EnvironmentName !== undefined) { - entries["EnvironmentName"] = input.EnvironmentName; - } - if (input.ApplicationName !== undefined) { - entries["ApplicationName"] = input.ApplicationName; - } if (input.TemplateName !== undefined) { entries["TemplateName"] = input.TemplateName; } @@ -5389,6 +5383,12 @@ const serializeAws_queryValidateConfigurationSettingsMessage = ( entries[loc] = value; }); } + if (input.ApplicationName !== undefined) { + entries["ApplicationName"] = input.ApplicationName; + } + if (input.EnvironmentName !== undefined) { + entries["EnvironmentName"] = input.EnvironmentName; + } return entries; }; @@ -5414,20 +5414,32 @@ const serializeAws_queryVersionLabelsList = (input: string[], context: __SerdeCo const deserializeAws_queryApplicationDescription = (output: any, context: __SerdeContext): ApplicationDescription => { let contents: any = { - DateUpdated: undefined, - ApplicationName: undefined, - Versions: undefined, - ResourceLifecycleConfig: undefined, - Description: undefined, ApplicationArn: undefined, + Description: undefined, ConfigurationTemplates: undefined, DateCreated: undefined, + Versions: undefined, + ApplicationName: undefined, + DateUpdated: undefined, + ResourceLifecycleConfig: undefined, }; - if (output["DateUpdated"] !== undefined) { - contents.DateUpdated = new Date(output["DateUpdated"]); + if (output["ApplicationArn"] !== undefined) { + contents.ApplicationArn = output["ApplicationArn"]; } - if (output["ApplicationName"] !== undefined) { - contents.ApplicationName = output["ApplicationName"]; + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; + } + if (output.ConfigurationTemplates === "") { + contents.ConfigurationTemplates = []; + } + if (output["ConfigurationTemplates"] !== undefined && output["ConfigurationTemplates"]["member"] !== undefined) { + contents.ConfigurationTemplates = deserializeAws_queryConfigurationTemplateNamesList( + __getArrayIfSingleItem(output["ConfigurationTemplates"]["member"]), + context + ); + } + if (output["DateCreated"] !== undefined) { + contents.DateCreated = new Date(output["DateCreated"]); } if (output.Versions === "") { contents.Versions = []; @@ -5438,30 +5450,18 @@ const deserializeAws_queryApplicationDescription = (output: any, context: __Serd context ); } + if (output["ApplicationName"] !== undefined) { + contents.ApplicationName = output["ApplicationName"]; + } + if (output["DateUpdated"] !== undefined) { + contents.DateUpdated = new Date(output["DateUpdated"]); + } if (output["ResourceLifecycleConfig"] !== undefined) { contents.ResourceLifecycleConfig = deserializeAws_queryApplicationResourceLifecycleConfig( output["ResourceLifecycleConfig"], context ); } - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; - } - if (output["ApplicationArn"] !== undefined) { - contents.ApplicationArn = output["ApplicationArn"]; - } - if (output.ConfigurationTemplates === "") { - contents.ConfigurationTemplates = []; - } - if (output["ConfigurationTemplates"] !== undefined && output["ConfigurationTemplates"]["member"] !== undefined) { - contents.ConfigurationTemplates = deserializeAws_queryConfigurationTemplateNamesList( - __getArrayIfSingleItem(output["ConfigurationTemplates"]["member"]), - context - ); - } - if (output["DateCreated"] !== undefined) { - contents.DateCreated = new Date(output["DateCreated"]); - } return contents; }; @@ -5506,23 +5506,23 @@ const deserializeAws_queryApplicationDescriptionsMessage = ( const deserializeAws_queryApplicationMetrics = (output: any, context: __SerdeContext): ApplicationMetrics => { let contents: any = { - Latency: undefined, RequestCount: undefined, - StatusCodes: undefined, + Latency: undefined, Duration: undefined, + StatusCodes: undefined, }; - if (output["Latency"] !== undefined) { - contents.Latency = deserializeAws_queryLatency(output["Latency"], context); - } if (output["RequestCount"] !== undefined) { contents.RequestCount = parseInt(output["RequestCount"]); } - if (output["StatusCodes"] !== undefined) { - contents.StatusCodes = deserializeAws_queryStatusCodes(output["StatusCodes"], context); + if (output["Latency"] !== undefined) { + contents.Latency = deserializeAws_queryLatency(output["Latency"], context); } if (output["Duration"] !== undefined) { contents.Duration = parseInt(output["Duration"]); } + if (output["StatusCodes"] !== undefined) { + contents.StatusCodes = deserializeAws_queryStatusCodes(output["StatusCodes"], context); + } return contents; }; @@ -5531,18 +5531,18 @@ const deserializeAws_queryApplicationResourceLifecycleConfig = ( context: __SerdeContext ): ApplicationResourceLifecycleConfig => { let contents: any = { - ServiceRole: undefined, VersionLifecycleConfig: undefined, + ServiceRole: undefined, }; - if (output["ServiceRole"] !== undefined) { - contents.ServiceRole = output["ServiceRole"]; - } if (output["VersionLifecycleConfig"] !== undefined) { contents.VersionLifecycleConfig = deserializeAws_queryApplicationVersionLifecycleConfig( output["VersionLifecycleConfig"], context ); } + if (output["ServiceRole"] !== undefined) { + contents.ServiceRole = output["ServiceRole"]; + } return contents; }; @@ -5551,18 +5551,18 @@ const deserializeAws_queryApplicationResourceLifecycleDescriptionMessage = ( context: __SerdeContext ): ApplicationResourceLifecycleDescriptionMessage => { let contents: any = { - ApplicationName: undefined, ResourceLifecycleConfig: undefined, + ApplicationName: undefined, }; - if (output["ApplicationName"] !== undefined) { - contents.ApplicationName = output["ApplicationName"]; - } if (output["ResourceLifecycleConfig"] !== undefined) { contents.ResourceLifecycleConfig = deserializeAws_queryApplicationResourceLifecycleConfig( output["ResourceLifecycleConfig"], context ); } + if (output["ApplicationName"] !== undefined) { + contents.ApplicationName = output["ApplicationName"]; + } return contents; }; @@ -5571,25 +5571,22 @@ const deserializeAws_queryApplicationVersionDescription = ( context: __SerdeContext ): ApplicationVersionDescription => { let contents: any = { - ApplicationName: undefined, - SourceBundle: undefined, - DateUpdated: undefined, + BuildArn: undefined, + Description: undefined, SourceBuildInformation: undefined, - VersionLabel: undefined, + DateCreated: undefined, + SourceBundle: undefined, + ApplicationName: undefined, Status: undefined, - Description: undefined, - BuildArn: undefined, ApplicationVersionArn: undefined, - DateCreated: undefined, + VersionLabel: undefined, + DateUpdated: undefined, }; - if (output["ApplicationName"] !== undefined) { - contents.ApplicationName = output["ApplicationName"]; - } - if (output["SourceBundle"] !== undefined) { - contents.SourceBundle = deserializeAws_queryS3Location(output["SourceBundle"], context); + if (output["BuildArn"] !== undefined) { + contents.BuildArn = output["BuildArn"]; } - if (output["DateUpdated"] !== undefined) { - contents.DateUpdated = new Date(output["DateUpdated"]); + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; } if (output["SourceBuildInformation"] !== undefined) { contents.SourceBuildInformation = deserializeAws_querySourceBuildInformation( @@ -5597,23 +5594,26 @@ const deserializeAws_queryApplicationVersionDescription = ( context ); } - if (output["VersionLabel"] !== undefined) { - contents.VersionLabel = output["VersionLabel"]; + if (output["DateCreated"] !== undefined) { + contents.DateCreated = new Date(output["DateCreated"]); } - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; + if (output["SourceBundle"] !== undefined) { + contents.SourceBundle = deserializeAws_queryS3Location(output["SourceBundle"], context); } - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; + if (output["ApplicationName"] !== undefined) { + contents.ApplicationName = output["ApplicationName"]; } - if (output["BuildArn"] !== undefined) { - contents.BuildArn = output["BuildArn"]; + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; } if (output["ApplicationVersionArn"] !== undefined) { contents.ApplicationVersionArn = output["ApplicationVersionArn"]; } - if (output["DateCreated"] !== undefined) { - contents.DateCreated = new Date(output["DateCreated"]); + if (output["VersionLabel"] !== undefined) { + contents.VersionLabel = output["VersionLabel"]; + } + if (output["DateUpdated"] !== undefined) { + contents.DateUpdated = new Date(output["DateUpdated"]); } return contents; }; @@ -5646,9 +5646,12 @@ const deserializeAws_queryApplicationVersionDescriptionsMessage = ( context: __SerdeContext ): ApplicationVersionDescriptionsMessage => { let contents: any = { - ApplicationVersions: undefined, NextToken: undefined, + ApplicationVersions: undefined, }; + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } if (output.ApplicationVersions === "") { contents.ApplicationVersions = []; } @@ -5658,9 +5661,6 @@ const deserializeAws_queryApplicationVersionDescriptionsMessage = ( context ); } - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } return contents; }; @@ -5669,15 +5669,15 @@ const deserializeAws_queryApplicationVersionLifecycleConfig = ( context: __SerdeContext ): ApplicationVersionLifecycleConfig => { let contents: any = { - MaxCountRule: undefined, MaxAgeRule: undefined, + MaxCountRule: undefined, }; - if (output["MaxCountRule"] !== undefined) { - contents.MaxCountRule = deserializeAws_queryMaxCountRule(output["MaxCountRule"], context); - } if (output["MaxAgeRule"] !== undefined) { contents.MaxAgeRule = deserializeAws_queryMaxAgeRule(output["MaxAgeRule"], context); } + if (output["MaxCountRule"] !== undefined) { + contents.MaxCountRule = deserializeAws_queryMaxCountRule(output["MaxCountRule"], context); + } return contents; }; @@ -5686,22 +5686,22 @@ const deserializeAws_queryApplyEnvironmentManagedActionResult = ( context: __SerdeContext ): ApplyEnvironmentManagedActionResult => { let contents: any = { + ActionType: undefined, ActionDescription: undefined, - Status: undefined, ActionId: undefined, - ActionType: undefined, + Status: undefined, }; + if (output["ActionType"] !== undefined) { + contents.ActionType = output["ActionType"]; + } if (output["ActionDescription"] !== undefined) { contents.ActionDescription = output["ActionDescription"]; } - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; - } if (output["ActionId"] !== undefined) { contents.ActionId = output["ActionId"]; } - if (output["ActionType"] !== undefined) { - contents.ActionType = output["ActionType"]; + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; } return contents; }; @@ -5750,15 +5750,15 @@ const deserializeAws_queryCheckDNSAvailabilityResultMessage = ( context: __SerdeContext ): CheckDNSAvailabilityResultMessage => { let contents: any = { - Available: undefined, FullyQualifiedCNAME: undefined, + Available: undefined, }; - if (output["Available"] !== undefined) { - contents.Available = output["Available"] == "true"; - } if (output["FullyQualifiedCNAME"] !== undefined) { contents.FullyQualifiedCNAME = output["FullyQualifiedCNAME"]; } + if (output["Available"] !== undefined) { + contents.Available = output["Available"] == "true"; + } return contents; }; @@ -5780,44 +5780,32 @@ const deserializeAws_queryConfigurationOptionDescription = ( context: __SerdeContext ): ConfigurationOptionDescription => { let contents: any = { - DefaultValue: undefined, - ChangeSeverity: undefined, - MaxLength: undefined, - UserDefined: undefined, - Name: undefined, Namespace: undefined, - MaxValue: undefined, - MinValue: undefined, Regex: undefined, + MaxLength: undefined, + MinValue: undefined, + Name: undefined, ValueOptions: undefined, + ChangeSeverity: undefined, ValueType: undefined, + DefaultValue: undefined, + UserDefined: undefined, + MaxValue: undefined, }; - if (output["DefaultValue"] !== undefined) { - contents.DefaultValue = output["DefaultValue"]; + if (output["Namespace"] !== undefined) { + contents.Namespace = output["Namespace"]; } - if (output["ChangeSeverity"] !== undefined) { - contents.ChangeSeverity = output["ChangeSeverity"]; + if (output["Regex"] !== undefined) { + contents.Regex = deserializeAws_queryOptionRestrictionRegex(output["Regex"], context); } if (output["MaxLength"] !== undefined) { contents.MaxLength = parseInt(output["MaxLength"]); } - if (output["UserDefined"] !== undefined) { - contents.UserDefined = output["UserDefined"] == "true"; - } - if (output["Name"] !== undefined) { - contents.Name = output["Name"]; - } - if (output["Namespace"] !== undefined) { - contents.Namespace = output["Namespace"]; - } - if (output["MaxValue"] !== undefined) { - contents.MaxValue = parseInt(output["MaxValue"]); - } if (output["MinValue"] !== undefined) { contents.MinValue = parseInt(output["MinValue"]); } - if (output["Regex"] !== undefined) { - contents.Regex = deserializeAws_queryOptionRestrictionRegex(output["Regex"], context); + if (output["Name"] !== undefined) { + contents.Name = output["Name"]; } if (output.ValueOptions === "") { contents.ValueOptions = []; @@ -5828,9 +5816,21 @@ const deserializeAws_queryConfigurationOptionDescription = ( context ); } + if (output["ChangeSeverity"] !== undefined) { + contents.ChangeSeverity = output["ChangeSeverity"]; + } if (output["ValueType"] !== undefined) { contents.ValueType = output["ValueType"]; } + if (output["DefaultValue"] !== undefined) { + contents.DefaultValue = output["DefaultValue"]; + } + if (output["UserDefined"] !== undefined) { + contents.UserDefined = output["UserDefined"] == "true"; + } + if (output["MaxValue"] !== undefined) { + contents.MaxValue = parseInt(output["MaxValue"]); + } return contents; }; @@ -5850,13 +5850,10 @@ const deserializeAws_queryConfigurationOptionsDescription = ( context: __SerdeContext ): ConfigurationOptionsDescription => { let contents: any = { - PlatformArn: undefined, SolutionStackName: undefined, Options: undefined, + PlatformArn: undefined, }; - if (output["PlatformArn"] !== undefined) { - contents.PlatformArn = output["PlatformArn"]; - } if (output["SolutionStackName"] !== undefined) { contents.SolutionStackName = output["SolutionStackName"]; } @@ -5869,6 +5866,9 @@ const deserializeAws_queryConfigurationOptionsDescription = ( context ); } + if (output["PlatformArn"] !== undefined) { + contents.PlatformArn = output["PlatformArn"]; + } return contents; }; @@ -5877,23 +5877,23 @@ const deserializeAws_queryConfigurationOptionSetting = ( context: __SerdeContext ): ConfigurationOptionSetting => { let contents: any = { - OptionName: undefined, - ResourceName: undefined, Value: undefined, Namespace: undefined, + OptionName: undefined, + ResourceName: undefined, }; - if (output["OptionName"] !== undefined) { - contents.OptionName = output["OptionName"]; - } - if (output["ResourceName"] !== undefined) { - contents.ResourceName = output["ResourceName"]; - } if (output["Value"] !== undefined) { contents.Value = output["Value"]; } if (output["Namespace"] !== undefined) { contents.Namespace = output["Namespace"]; } + if (output["OptionName"] !== undefined) { + contents.OptionName = output["OptionName"]; + } + if (output["ResourceName"] !== undefined) { + contents.ResourceName = output["ResourceName"]; + } return contents; }; @@ -5909,29 +5909,17 @@ const deserializeAws_queryConfigurationSettingsDescription = ( context: __SerdeContext ): ConfigurationSettingsDescription => { let contents: any = { - EnvironmentName: undefined, + OptionSettings: undefined, PlatformArn: undefined, + DeploymentStatus: undefined, + TemplateName: undefined, ApplicationName: undefined, - DateUpdated: undefined, - OptionSettings: undefined, - SolutionStackName: undefined, - Description: undefined, DateCreated: undefined, - TemplateName: undefined, - DeploymentStatus: undefined, + Description: undefined, + SolutionStackName: undefined, + DateUpdated: undefined, + EnvironmentName: undefined, }; - if (output["EnvironmentName"] !== undefined) { - contents.EnvironmentName = output["EnvironmentName"]; - } - if (output["PlatformArn"] !== undefined) { - contents.PlatformArn = output["PlatformArn"]; - } - if (output["ApplicationName"] !== undefined) { - contents.ApplicationName = output["ApplicationName"]; - } - if (output["DateUpdated"] !== undefined) { - contents.DateUpdated = new Date(output["DateUpdated"]); - } if (output.OptionSettings === "") { contents.OptionSettings = []; } @@ -5941,20 +5929,32 @@ const deserializeAws_queryConfigurationSettingsDescription = ( context ); } - if (output["SolutionStackName"] !== undefined) { - contents.SolutionStackName = output["SolutionStackName"]; + if (output["PlatformArn"] !== undefined) { + contents.PlatformArn = output["PlatformArn"]; } - if (output["Description"] !== undefined) { - contents.Description = output["Description"]; + if (output["DeploymentStatus"] !== undefined) { + contents.DeploymentStatus = output["DeploymentStatus"]; + } + if (output["TemplateName"] !== undefined) { + contents.TemplateName = output["TemplateName"]; + } + if (output["ApplicationName"] !== undefined) { + contents.ApplicationName = output["ApplicationName"]; } if (output["DateCreated"] !== undefined) { contents.DateCreated = new Date(output["DateCreated"]); } - if (output["TemplateName"] !== undefined) { - contents.TemplateName = output["TemplateName"]; + if (output["Description"] !== undefined) { + contents.Description = output["Description"]; } - if (output["DeploymentStatus"] !== undefined) { - contents.DeploymentStatus = output["DeploymentStatus"]; + if (output["SolutionStackName"] !== undefined) { + contents.SolutionStackName = output["SolutionStackName"]; + } + if (output["DateUpdated"] !== undefined) { + contents.DateUpdated = new Date(output["DateUpdated"]); + } + if (output["EnvironmentName"] !== undefined) { + contents.EnvironmentName = output["EnvironmentName"]; } return contents; }; @@ -6010,20 +6010,20 @@ const deserializeAws_queryConfigurationTemplateNamesList = (output: any, context const deserializeAws_queryCPUUtilization = (output: any, context: __SerdeContext): CPUUtilization => { let contents: any = { - User: undefined, - Idle: undefined, + Nice: undefined, + SoftIRQ: undefined, IRQ: undefined, Privileged: undefined, - SoftIRQ: undefined, - IOWait: undefined, + Idle: undefined, + User: undefined, System: undefined, - Nice: undefined, + IOWait: undefined, }; - if (output["User"] !== undefined) { - contents.User = parseFloat(output["User"]); + if (output["Nice"] !== undefined) { + contents.Nice = parseFloat(output["Nice"]); } - if (output["Idle"] !== undefined) { - contents.Idle = parseFloat(output["Idle"]); + if (output["SoftIRQ"] !== undefined) { + contents.SoftIRQ = parseFloat(output["SoftIRQ"]); } if (output["IRQ"] !== undefined) { contents.IRQ = parseFloat(output["IRQ"]); @@ -6031,17 +6031,17 @@ const deserializeAws_queryCPUUtilization = (output: any, context: __SerdeContext if (output["Privileged"] !== undefined) { contents.Privileged = parseFloat(output["Privileged"]); } - if (output["SoftIRQ"] !== undefined) { - contents.SoftIRQ = parseFloat(output["SoftIRQ"]); + if (output["Idle"] !== undefined) { + contents.Idle = parseFloat(output["Idle"]); } - if (output["IOWait"] !== undefined) { - contents.IOWait = parseFloat(output["IOWait"]); + if (output["User"] !== undefined) { + contents.User = parseFloat(output["User"]); } if (output["System"] !== undefined) { contents.System = parseFloat(output["System"]); } - if (output["Nice"] !== undefined) { - contents.Nice = parseFloat(output["Nice"]); + if (output["IOWait"] !== undefined) { + contents.IOWait = parseFloat(output["IOWait"]); } return contents; }; @@ -6051,15 +6051,15 @@ const deserializeAws_queryCreatePlatformVersionResult = ( context: __SerdeContext ): CreatePlatformVersionResult => { let contents: any = { - Builder: undefined, PlatformSummary: undefined, + Builder: undefined, }; - if (output["Builder"] !== undefined) { - contents.Builder = deserializeAws_queryBuilder(output["Builder"], context); - } if (output["PlatformSummary"] !== undefined) { contents.PlatformSummary = deserializeAws_queryPlatformSummary(output["PlatformSummary"], context); } + if (output["Builder"] !== undefined) { + contents.Builder = deserializeAws_queryBuilder(output["Builder"], context); + } return contents; }; @@ -6078,15 +6078,15 @@ const deserializeAws_queryCreateStorageLocationResultMessage = ( const deserializeAws_queryCustomAmi = (output: any, context: __SerdeContext): CustomAmi => { let contents: any = { - VirtualizationType: undefined, ImageId: undefined, + VirtualizationType: undefined, }; - if (output["VirtualizationType"] !== undefined) { - contents.VirtualizationType = output["VirtualizationType"]; - } if (output["ImageId"] !== undefined) { contents.ImageId = output["ImageId"]; } + if (output["VirtualizationType"] !== undefined) { + contents.VirtualizationType = output["VirtualizationType"]; + } return contents; }; @@ -6110,22 +6110,22 @@ const deserializeAws_queryDeletePlatformVersionResult = ( const deserializeAws_queryDeployment = (output: any, context: __SerdeContext): Deployment => { let contents: any = { VersionLabel: undefined, - DeploymentId: undefined, Status: undefined, DeploymentTime: undefined, + DeploymentId: undefined, }; if (output["VersionLabel"] !== undefined) { contents.VersionLabel = output["VersionLabel"]; } - if (output["DeploymentId"] !== undefined) { - contents.DeploymentId = parseInt(output["DeploymentId"]); - } if (output["Status"] !== undefined) { contents.Status = output["Status"]; } if (output["DeploymentTime"] !== undefined) { contents.DeploymentTime = new Date(output["DeploymentTime"]); } + if (output["DeploymentId"] !== undefined) { + contents.DeploymentId = parseInt(output["DeploymentId"]); + } return contents; }; @@ -6147,41 +6147,41 @@ const deserializeAws_queryDescribeEnvironmentHealthResult = ( context: __SerdeContext ): DescribeEnvironmentHealthResult => { let contents: any = { + HealthStatus: undefined, + Status: undefined, + Color: undefined, EnvironmentName: undefined, ApplicationMetrics: undefined, + Causes: undefined, InstancesHealth: undefined, RefreshedAt: undefined, - Causes: undefined, - Color: undefined, - Status: undefined, - HealthStatus: undefined, }; + if (output["HealthStatus"] !== undefined) { + contents.HealthStatus = output["HealthStatus"]; + } + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; + } + if (output["Color"] !== undefined) { + contents.Color = output["Color"]; + } if (output["EnvironmentName"] !== undefined) { contents.EnvironmentName = output["EnvironmentName"]; } if (output["ApplicationMetrics"] !== undefined) { contents.ApplicationMetrics = deserializeAws_queryApplicationMetrics(output["ApplicationMetrics"], context); } - if (output["InstancesHealth"] !== undefined) { - contents.InstancesHealth = deserializeAws_queryInstanceHealthSummary(output["InstancesHealth"], context); - } - if (output["RefreshedAt"] !== undefined) { - contents.RefreshedAt = new Date(output["RefreshedAt"]); - } if (output.Causes === "") { contents.Causes = []; } if (output["Causes"] !== undefined && output["Causes"]["member"] !== undefined) { contents.Causes = deserializeAws_queryCauses(__getArrayIfSingleItem(output["Causes"]["member"]), context); } - if (output["Color"] !== undefined) { - contents.Color = output["Color"]; - } - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; + if (output["InstancesHealth"] !== undefined) { + contents.InstancesHealth = deserializeAws_queryInstanceHealthSummary(output["InstancesHealth"], context); } - if (output["HealthStatus"] !== undefined) { - contents.HealthStatus = output["HealthStatus"]; + if (output["RefreshedAt"] !== undefined) { + contents.RefreshedAt = new Date(output["RefreshedAt"]); } return contents; }; @@ -6236,10 +6236,16 @@ const deserializeAws_queryDescribeInstancesHealthResult = ( context: __SerdeContext ): DescribeInstancesHealthResult => { let contents: any = { - InstanceHealthList: undefined, - RefreshedAt: undefined, NextToken: undefined, + RefreshedAt: undefined, + InstanceHealthList: undefined, }; + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } + if (output["RefreshedAt"] !== undefined) { + contents.RefreshedAt = new Date(output["RefreshedAt"]); + } if (output.InstanceHealthList === "") { contents.InstanceHealthList = []; } @@ -6249,12 +6255,6 @@ const deserializeAws_queryDescribeInstancesHealthResult = ( context ); } - if (output["RefreshedAt"] !== undefined) { - contents.RefreshedAt = new Date(output["RefreshedAt"]); - } - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } return contents; }; @@ -6286,57 +6286,36 @@ const deserializeAws_queryElasticBeanstalkServiceException = ( const deserializeAws_queryEnvironmentDescription = (output: any, context: __SerdeContext): EnvironmentDescription => { let contents: any = { - AbortableOperationInProgress: undefined, - TemplateName: undefined, - HealthStatus: undefined, - EnvironmentLinks: undefined, - Resources: undefined, - Health: undefined, - VersionLabel: undefined, - SolutionStackName: undefined, + EndpointURL: undefined, + ApplicationName: undefined, + PlatformArn: undefined, DateUpdated: undefined, EnvironmentName: undefined, + DateCreated: undefined, Description: undefined, EnvironmentArn: undefined, - Status: undefined, - ApplicationName: undefined, - CNAME: undefined, - PlatformArn: undefined, - EndpointURL: undefined, - DateCreated: undefined, + VersionLabel: undefined, + Health: undefined, + HealthStatus: undefined, + EnvironmentId: undefined, + AbortableOperationInProgress: undefined, + SolutionStackName: undefined, Tier: undefined, + CNAME: undefined, OperationsRole: undefined, - EnvironmentId: undefined, + TemplateName: undefined, + Resources: undefined, + EnvironmentLinks: undefined, + Status: undefined, }; - if (output["AbortableOperationInProgress"] !== undefined) { - contents.AbortableOperationInProgress = output["AbortableOperationInProgress"] == "true"; - } - if (output["TemplateName"] !== undefined) { - contents.TemplateName = output["TemplateName"]; - } - if (output["HealthStatus"] !== undefined) { - contents.HealthStatus = output["HealthStatus"]; - } - if (output.EnvironmentLinks === "") { - contents.EnvironmentLinks = []; - } - if (output["EnvironmentLinks"] !== undefined && output["EnvironmentLinks"]["member"] !== undefined) { - contents.EnvironmentLinks = deserializeAws_queryEnvironmentLinks( - __getArrayIfSingleItem(output["EnvironmentLinks"]["member"]), - context - ); - } - if (output["Resources"] !== undefined) { - contents.Resources = deserializeAws_queryEnvironmentResourcesDescription(output["Resources"], context); - } - if (output["Health"] !== undefined) { - contents.Health = output["Health"]; + if (output["EndpointURL"] !== undefined) { + contents.EndpointURL = output["EndpointURL"]; } - if (output["VersionLabel"] !== undefined) { - contents.VersionLabel = output["VersionLabel"]; + if (output["ApplicationName"] !== undefined) { + contents.ApplicationName = output["ApplicationName"]; } - if (output["SolutionStackName"] !== undefined) { - contents.SolutionStackName = output["SolutionStackName"]; + if (output["PlatformArn"] !== undefined) { + contents.PlatformArn = output["PlatformArn"]; } if (output["DateUpdated"] !== undefined) { contents.DateUpdated = new Date(output["DateUpdated"]); @@ -6344,38 +6323,59 @@ const deserializeAws_queryEnvironmentDescription = (output: any, context: __Serd if (output["EnvironmentName"] !== undefined) { contents.EnvironmentName = output["EnvironmentName"]; } + if (output["DateCreated"] !== undefined) { + contents.DateCreated = new Date(output["DateCreated"]); + } if (output["Description"] !== undefined) { contents.Description = output["Description"]; } if (output["EnvironmentArn"] !== undefined) { contents.EnvironmentArn = output["EnvironmentArn"]; } - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; + if (output["VersionLabel"] !== undefined) { + contents.VersionLabel = output["VersionLabel"]; } - if (output["ApplicationName"] !== undefined) { - contents.ApplicationName = output["ApplicationName"]; + if (output["Health"] !== undefined) { + contents.Health = output["Health"]; } - if (output["CNAME"] !== undefined) { - contents.CNAME = output["CNAME"]; + if (output["HealthStatus"] !== undefined) { + contents.HealthStatus = output["HealthStatus"]; } - if (output["PlatformArn"] !== undefined) { - contents.PlatformArn = output["PlatformArn"]; + if (output["EnvironmentId"] !== undefined) { + contents.EnvironmentId = output["EnvironmentId"]; } - if (output["EndpointURL"] !== undefined) { - contents.EndpointURL = output["EndpointURL"]; + if (output["AbortableOperationInProgress"] !== undefined) { + contents.AbortableOperationInProgress = output["AbortableOperationInProgress"] == "true"; } - if (output["DateCreated"] !== undefined) { - contents.DateCreated = new Date(output["DateCreated"]); + if (output["SolutionStackName"] !== undefined) { + contents.SolutionStackName = output["SolutionStackName"]; } if (output["Tier"] !== undefined) { contents.Tier = deserializeAws_queryEnvironmentTier(output["Tier"], context); } + if (output["CNAME"] !== undefined) { + contents.CNAME = output["CNAME"]; + } if (output["OperationsRole"] !== undefined) { contents.OperationsRole = output["OperationsRole"]; } - if (output["EnvironmentId"] !== undefined) { - contents.EnvironmentId = output["EnvironmentId"]; + if (output["TemplateName"] !== undefined) { + contents.TemplateName = output["TemplateName"]; + } + if (output["Resources"] !== undefined) { + contents.Resources = deserializeAws_queryEnvironmentResourcesDescription(output["Resources"], context); + } + if (output.EnvironmentLinks === "") { + contents.EnvironmentLinks = []; + } + if (output["EnvironmentLinks"] !== undefined && output["EnvironmentLinks"]["member"] !== undefined) { + contents.EnvironmentLinks = deserializeAws_queryEnvironmentLinks( + __getArrayIfSingleItem(output["EnvironmentLinks"]["member"]), + context + ); + } + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; } return contents; }; @@ -6392,9 +6392,12 @@ const deserializeAws_queryEnvironmentDescriptionsMessage = ( context: __SerdeContext ): EnvironmentDescriptionsMessage => { let contents: any = { - Environments: undefined, NextToken: undefined, + Environments: undefined, }; + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } if (output.Environments === "") { contents.Environments = []; } @@ -6404,9 +6407,6 @@ const deserializeAws_queryEnvironmentDescriptionsMessage = ( context ); } - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } return contents; }; @@ -6465,21 +6465,27 @@ const deserializeAws_queryEnvironmentResourceDescription = ( context: __SerdeContext ): EnvironmentResourceDescription => { let contents: any = { - Instances: undefined, - AutoScalingGroups: undefined, - EnvironmentName: undefined, - Triggers: undefined, Queues: undefined, - LoadBalancers: undefined, LaunchTemplates: undefined, + AutoScalingGroups: undefined, + LoadBalancers: undefined, + Instances: undefined, LaunchConfigurations: undefined, + EnvironmentName: undefined, + Triggers: undefined, }; - if (output.Instances === "") { - contents.Instances = []; + if (output.Queues === "") { + contents.Queues = []; } - if (output["Instances"] !== undefined && output["Instances"]["member"] !== undefined) { - contents.Instances = deserializeAws_queryInstanceList( - __getArrayIfSingleItem(output["Instances"]["member"]), + if (output["Queues"] !== undefined && output["Queues"]["member"] !== undefined) { + contents.Queues = deserializeAws_queryQueueList(__getArrayIfSingleItem(output["Queues"]["member"]), context); + } + if (output.LaunchTemplates === "") { + contents.LaunchTemplates = []; + } + if (output["LaunchTemplates"] !== undefined && output["LaunchTemplates"]["member"] !== undefined) { + contents.LaunchTemplates = deserializeAws_queryLaunchTemplateList( + __getArrayIfSingleItem(output["LaunchTemplates"]["member"]), context ); } @@ -6492,21 +6498,6 @@ const deserializeAws_queryEnvironmentResourceDescription = ( context ); } - if (output["EnvironmentName"] !== undefined) { - contents.EnvironmentName = output["EnvironmentName"]; - } - if (output.Triggers === "") { - contents.Triggers = []; - } - if (output["Triggers"] !== undefined && output["Triggers"]["member"] !== undefined) { - contents.Triggers = deserializeAws_queryTriggerList(__getArrayIfSingleItem(output["Triggers"]["member"]), context); - } - if (output.Queues === "") { - contents.Queues = []; - } - if (output["Queues"] !== undefined && output["Queues"]["member"] !== undefined) { - contents.Queues = deserializeAws_queryQueueList(__getArrayIfSingleItem(output["Queues"]["member"]), context); - } if (output.LoadBalancers === "") { contents.LoadBalancers = []; } @@ -6516,12 +6507,12 @@ const deserializeAws_queryEnvironmentResourceDescription = ( context ); } - if (output.LaunchTemplates === "") { - contents.LaunchTemplates = []; + if (output.Instances === "") { + contents.Instances = []; } - if (output["LaunchTemplates"] !== undefined && output["LaunchTemplates"]["member"] !== undefined) { - contents.LaunchTemplates = deserializeAws_queryLaunchTemplateList( - __getArrayIfSingleItem(output["LaunchTemplates"]["member"]), + if (output["Instances"] !== undefined && output["Instances"]["member"] !== undefined) { + contents.Instances = deserializeAws_queryInstanceList( + __getArrayIfSingleItem(output["Instances"]["member"]), context ); } @@ -6534,6 +6525,15 @@ const deserializeAws_queryEnvironmentResourceDescription = ( context ); } + if (output["EnvironmentName"] !== undefined) { + contents.EnvironmentName = output["EnvironmentName"]; + } + if (output.Triggers === "") { + contents.Triggers = []; + } + if (output["Triggers"] !== undefined && output["Triggers"]["member"] !== undefined) { + contents.Triggers = deserializeAws_queryTriggerList(__getArrayIfSingleItem(output["Triggers"]["member"]), context); + } return contents; }; @@ -6568,61 +6568,61 @@ const deserializeAws_queryEnvironmentResourcesDescription = ( const deserializeAws_queryEnvironmentTier = (output: any, context: __SerdeContext): EnvironmentTier => { let contents: any = { + Version: undefined, Name: undefined, Type: undefined, - Version: undefined, }; + if (output["Version"] !== undefined) { + contents.Version = output["Version"]; + } if (output["Name"] !== undefined) { contents.Name = output["Name"]; } if (output["Type"] !== undefined) { contents.Type = output["Type"]; } - if (output["Version"] !== undefined) { - contents.Version = output["Version"]; - } return contents; }; const deserializeAws_queryEventDescription = (output: any, context: __SerdeContext): EventDescription => { let contents: any = { - TemplateName: undefined, - RequestId: undefined, - EventDate: undefined, ApplicationName: undefined, - Severity: undefined, - VersionLabel: undefined, - PlatformArn: undefined, EnvironmentName: undefined, + EventDate: undefined, + PlatformArn: undefined, Message: undefined, + TemplateName: undefined, + Severity: undefined, + RequestId: undefined, + VersionLabel: undefined, }; - if (output["TemplateName"] !== undefined) { - contents.TemplateName = output["TemplateName"]; + if (output["ApplicationName"] !== undefined) { + contents.ApplicationName = output["ApplicationName"]; } - if (output["RequestId"] !== undefined) { - contents.RequestId = output["RequestId"]; + if (output["EnvironmentName"] !== undefined) { + contents.EnvironmentName = output["EnvironmentName"]; } if (output["EventDate"] !== undefined) { contents.EventDate = new Date(output["EventDate"]); } - if (output["ApplicationName"] !== undefined) { - contents.ApplicationName = output["ApplicationName"]; + if (output["PlatformArn"] !== undefined) { + contents.PlatformArn = output["PlatformArn"]; + } + if (output["Message"] !== undefined) { + contents.Message = output["Message"]; + } + if (output["TemplateName"] !== undefined) { + contents.TemplateName = output["TemplateName"]; } if (output["Severity"] !== undefined) { contents.Severity = output["Severity"]; } + if (output["RequestId"] !== undefined) { + contents.RequestId = output["RequestId"]; + } if (output["VersionLabel"] !== undefined) { contents.VersionLabel = output["VersionLabel"]; } - if (output["PlatformArn"] !== undefined) { - contents.PlatformArn = output["PlatformArn"]; - } - if (output["EnvironmentName"] !== undefined) { - contents.EnvironmentName = output["EnvironmentName"]; - } - if (output["Message"] !== undefined) { - contents.Message = output["Message"]; - } return contents; }; @@ -6669,38 +6669,38 @@ const deserializeAws_queryInstanceHealthList = (output: any, context: __SerdeCon const deserializeAws_queryInstanceHealthSummary = (output: any, context: __SerdeContext): InstanceHealthSummary => { let contents: any = { - Degraded: undefined, + Ok: undefined, + Severe: undefined, + Pending: undefined, NoData: undefined, - Unknown: undefined, Info: undefined, - Pending: undefined, + Degraded: undefined, Warning: undefined, - Ok: undefined, - Severe: undefined, + Unknown: undefined, }; - if (output["Degraded"] !== undefined) { - contents.Degraded = parseInt(output["Degraded"]); + if (output["Ok"] !== undefined) { + contents.Ok = parseInt(output["Ok"]); + } + if (output["Severe"] !== undefined) { + contents.Severe = parseInt(output["Severe"]); + } + if (output["Pending"] !== undefined) { + contents.Pending = parseInt(output["Pending"]); } if (output["NoData"] !== undefined) { contents.NoData = parseInt(output["NoData"]); } - if (output["Unknown"] !== undefined) { - contents.Unknown = parseInt(output["Unknown"]); - } if (output["Info"] !== undefined) { contents.Info = parseInt(output["Info"]); } - if (output["Pending"] !== undefined) { - contents.Pending = parseInt(output["Pending"]); + if (output["Degraded"] !== undefined) { + contents.Degraded = parseInt(output["Degraded"]); } if (output["Warning"] !== undefined) { contents.Warning = parseInt(output["Warning"]); } - if (output["Ok"] !== undefined) { - contents.Ok = parseInt(output["Ok"]); - } - if (output["Severe"] !== undefined) { - contents.Severe = parseInt(output["Severe"]); + if (output["Unknown"] !== undefined) { + contents.Unknown = parseInt(output["Unknown"]); } return contents; }; @@ -6734,38 +6734,38 @@ const deserializeAws_queryInvalidRequestException = (output: any, context: __Ser const deserializeAws_queryLatency = (output: any, context: __SerdeContext): Latency => { let contents: any = { + P99: undefined, + P90: undefined, P50: undefined, - P95: undefined, - P85: undefined, + P10: undefined, P75: undefined, - P90: undefined, P999: undefined, - P10: undefined, - P99: undefined, + P85: undefined, + P95: undefined, }; + if (output["P99"] !== undefined) { + contents.P99 = parseFloat(output["P99"]); + } + if (output["P90"] !== undefined) { + contents.P90 = parseFloat(output["P90"]); + } if (output["P50"] !== undefined) { contents.P50 = parseFloat(output["P50"]); } - if (output["P95"] !== undefined) { - contents.P95 = parseFloat(output["P95"]); - } - if (output["P85"] !== undefined) { - contents.P85 = parseFloat(output["P85"]); + if (output["P10"] !== undefined) { + contents.P10 = parseFloat(output["P10"]); } if (output["P75"] !== undefined) { contents.P75 = parseFloat(output["P75"]); } - if (output["P90"] !== undefined) { - contents.P90 = parseFloat(output["P90"]); - } if (output["P999"] !== undefined) { contents.P999 = parseFloat(output["P999"]); } - if (output["P10"] !== undefined) { - contents.P10 = parseFloat(output["P10"]); + if (output["P85"] !== undefined) { + contents.P85 = parseFloat(output["P85"]); } - if (output["P99"] !== undefined) { - contents.P99 = parseFloat(output["P99"]); + if (output["P95"] !== undefined) { + contents.P95 = parseFloat(output["P95"]); } return contents; }; @@ -6872,12 +6872,9 @@ const deserializeAws_queryListPlatformVersionsResult = ( context: __SerdeContext ): ListPlatformVersionsResult => { let contents: any = { - NextToken: undefined, PlatformSummaryList: undefined, + NextToken: undefined, }; - if (output["NextToken"] !== undefined) { - contents.NextToken = output["NextToken"]; - } if (output.PlatformSummaryList === "") { contents.PlatformSummaryList = []; } @@ -6887,6 +6884,9 @@ const deserializeAws_queryListPlatformVersionsResult = ( context ); } + if (output["NextToken"] !== undefined) { + contents.NextToken = output["NextToken"]; + } return contents; }; @@ -6906,10 +6906,16 @@ const deserializeAws_queryLoadBalancer = (output: any, context: __SerdeContext): const deserializeAws_queryLoadBalancerDescription = (output: any, context: __SerdeContext): LoadBalancerDescription => { let contents: any = { - Listeners: undefined, - LoadBalancerName: undefined, Domain: undefined, + LoadBalancerName: undefined, + Listeners: undefined, }; + if (output["Domain"] !== undefined) { + contents.Domain = output["Domain"]; + } + if (output["LoadBalancerName"] !== undefined) { + contents.LoadBalancerName = output["LoadBalancerName"]; + } if (output.Listeners === "") { contents.Listeners = []; } @@ -6919,12 +6925,6 @@ const deserializeAws_queryLoadBalancerDescription = (output: any, context: __Ser context ); } - if (output["LoadBalancerName"] !== undefined) { - contents.LoadBalancerName = output["LoadBalancerName"]; - } - if (output["Domain"] !== undefined) { - contents.Domain = output["Domain"]; - } return contents; }; @@ -6938,14 +6938,17 @@ const deserializeAws_queryLoadBalancerListenersDescription = (output: any, conte const deserializeAws_queryManagedAction = (output: any, context: __SerdeContext): ManagedAction => { let contents: any = { - WindowStartTime: undefined, + ActionId: undefined, + Status: undefined, ActionDescription: undefined, ActionType: undefined, - Status: undefined, - ActionId: undefined, + WindowStartTime: undefined, }; - if (output["WindowStartTime"] !== undefined) { - contents.WindowStartTime = new Date(output["WindowStartTime"]); + if (output["ActionId"] !== undefined) { + contents.ActionId = output["ActionId"]; + } + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; } if (output["ActionDescription"] !== undefined) { contents.ActionDescription = output["ActionDescription"]; @@ -6953,11 +6956,8 @@ const deserializeAws_queryManagedAction = (output: any, context: __SerdeContext) if (output["ActionType"] !== undefined) { contents.ActionType = output["ActionType"]; } - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; - } - if (output["ActionId"] !== undefined) { - contents.ActionId = output["ActionId"]; + if (output["WindowStartTime"] !== undefined) { + contents.WindowStartTime = new Date(output["WindowStartTime"]); } return contents; }; @@ -6967,39 +6967,39 @@ const deserializeAws_queryManagedActionHistoryItem = ( context: __SerdeContext ): ManagedActionHistoryItem => { let contents: any = { - Status: undefined, - ActionId: undefined, + ActionDescription: undefined, + FailureType: undefined, ActionType: undefined, - FinishedTime: undefined, FailureDescription: undefined, - FailureType: undefined, + ActionId: undefined, + Status: undefined, + FinishedTime: undefined, ExecutedTime: undefined, - ActionDescription: undefined, }; - if (output["Status"] !== undefined) { - contents.Status = output["Status"]; + if (output["ActionDescription"] !== undefined) { + contents.ActionDescription = output["ActionDescription"]; } - if (output["ActionId"] !== undefined) { - contents.ActionId = output["ActionId"]; + if (output["FailureType"] !== undefined) { + contents.FailureType = output["FailureType"]; } if (output["ActionType"] !== undefined) { contents.ActionType = output["ActionType"]; } - if (output["FinishedTime"] !== undefined) { - contents.FinishedTime = new Date(output["FinishedTime"]); - } if (output["FailureDescription"] !== undefined) { contents.FailureDescription = output["FailureDescription"]; } - if (output["FailureType"] !== undefined) { - contents.FailureType = output["FailureType"]; + if (output["ActionId"] !== undefined) { + contents.ActionId = output["ActionId"]; + } + if (output["Status"] !== undefined) { + contents.Status = output["Status"]; + } + if (output["FinishedTime"] !== undefined) { + contents.FinishedTime = new Date(output["FinishedTime"]); } if (output["ExecutedTime"] !== undefined) { contents.ExecutedTime = new Date(output["ExecutedTime"]); } - if (output["ActionDescription"] !== undefined) { - contents.ActionDescription = output["ActionDescription"]; - } return contents; }; @@ -7047,19 +7047,19 @@ const deserializeAws_queryMaxAgeRule = (output: any, context: __SerdeContext): M const deserializeAws_queryMaxCountRule = (output: any, context: __SerdeContext): MaxCountRule => { let contents: any = { - Enabled: undefined, MaxCount: undefined, DeleteSourceFromS3: undefined, + Enabled: undefined, }; - if (output["Enabled"] !== undefined) { - contents.Enabled = output["Enabled"] == "true"; - } if (output["MaxCount"] !== undefined) { contents.MaxCount = parseInt(output["MaxCount"]); } if (output["DeleteSourceFromS3"] !== undefined) { contents.DeleteSourceFromS3 = output["DeleteSourceFromS3"] == "true"; } + if (output["Enabled"] !== undefined) { + contents.Enabled = output["Enabled"] == "true"; + } return contents; }; @@ -7078,32 +7078,38 @@ const deserializeAws_queryOperationInProgressException = ( const deserializeAws_queryOptionRestrictionRegex = (output: any, context: __SerdeContext): OptionRestrictionRegex => { let contents: any = { - Label: undefined, Pattern: undefined, + Label: undefined, }; - if (output["Label"] !== undefined) { - contents.Label = output["Label"]; - } if (output["Pattern"] !== undefined) { contents.Pattern = output["Pattern"]; } + if (output["Label"] !== undefined) { + contents.Label = output["Label"]; + } return contents; }; const deserializeAws_queryPlatformBranchSummary = (output: any, context: __SerdeContext): PlatformBranchSummary => { let contents: any = { - LifecycleState: undefined, + PlatformName: undefined, + BranchOrder: undefined, BranchName: undefined, + LifecycleState: undefined, SupportedTierList: undefined, - BranchOrder: undefined, - PlatformName: undefined, }; - if (output["LifecycleState"] !== undefined) { - contents.LifecycleState = output["LifecycleState"]; + if (output["PlatformName"] !== undefined) { + contents.PlatformName = output["PlatformName"]; + } + if (output["BranchOrder"] !== undefined) { + contents.BranchOrder = parseInt(output["BranchOrder"]); } if (output["BranchName"] !== undefined) { contents.BranchName = output["BranchName"]; } + if (output["LifecycleState"] !== undefined) { + contents.LifecycleState = output["LifecycleState"]; + } if (output.SupportedTierList === "") { contents.SupportedTierList = []; } @@ -7113,12 +7119,6 @@ const deserializeAws_queryPlatformBranchSummary = (output: any, context: __Serde context ); } - if (output["BranchOrder"] !== undefined) { - contents.BranchOrder = parseInt(output["BranchOrder"]); - } - if (output["PlatformName"] !== undefined) { - contents.PlatformName = output["PlatformName"]; - } return contents; }; @@ -7131,43 +7131,28 @@ const deserializeAws_queryPlatformBranchSummaryList = ( const deserializeAws_queryPlatformDescription = (output: any, context: __SerdeContext): PlatformDescription => { let contents: any = { - PlatformBranchName: undefined, - PlatformStatus: undefined, - PlatformName: undefined, - Maintainer: undefined, - PlatformOwner: undefined, ProgrammingLanguages: undefined, - SupportedAddonList: undefined, - SolutionStackName: undefined, - Frameworks: undefined, + DateCreated: undefined, DateUpdated: undefined, + PlatformVersion: undefined, + SupportedAddonList: undefined, + OperatingSystemVersion: undefined, CustomAmiList: undefined, - Description: undefined, - PlatformCategory: undefined, - OperatingSystemName: undefined, + PlatformBranchName: undefined, + PlatformOwner: undefined, PlatformArn: undefined, + OperatingSystemName: undefined, PlatformBranchLifecycleState: undefined, - DateCreated: undefined, - OperatingSystemVersion: undefined, - PlatformVersion: undefined, SupportedTierList: undefined, + Frameworks: undefined, + PlatformName: undefined, + PlatformStatus: undefined, + Description: undefined, + SolutionStackName: undefined, + PlatformCategory: undefined, + Maintainer: undefined, PlatformLifecycleState: undefined, }; - if (output["PlatformBranchName"] !== undefined) { - contents.PlatformBranchName = output["PlatformBranchName"]; - } - if (output["PlatformStatus"] !== undefined) { - contents.PlatformStatus = output["PlatformStatus"]; - } - if (output["PlatformName"] !== undefined) { - contents.PlatformName = output["PlatformName"]; - } - if (output["Maintainer"] !== undefined) { - contents.Maintainer = output["Maintainer"]; - } - if (output["PlatformOwner"] !== undefined) { - contents.PlatformOwner = output["PlatformOwner"]; - } if (output.ProgrammingLanguages === "") { contents.ProgrammingLanguages = []; } @@ -7177,6 +7162,15 @@ const deserializeAws_queryPlatformDescription = (output: any, context: __SerdeCo context ); } + if (output["DateCreated"] !== undefined) { + contents.DateCreated = new Date(output["DateCreated"]); + } + if (output["DateUpdated"] !== undefined) { + contents.DateUpdated = new Date(output["DateUpdated"]); + } + if (output["PlatformVersion"] !== undefined) { + contents.PlatformVersion = output["PlatformVersion"]; + } if (output.SupportedAddonList === "") { contents.SupportedAddonList = []; } @@ -7186,9 +7180,42 @@ const deserializeAws_queryPlatformDescription = (output: any, context: __SerdeCo context ); } - if (output["SolutionStackName"] !== undefined) { - contents.SolutionStackName = output["SolutionStackName"]; - } + if (output["OperatingSystemVersion"] !== undefined) { + contents.OperatingSystemVersion = output["OperatingSystemVersion"]; + } + if (output.CustomAmiList === "") { + contents.CustomAmiList = []; + } + if (output["CustomAmiList"] !== undefined && output["CustomAmiList"]["member"] !== undefined) { + contents.CustomAmiList = deserializeAws_queryCustomAmiList( + __getArrayIfSingleItem(output["CustomAmiList"]["member"]), + context + ); + } + if (output["PlatformBranchName"] !== undefined) { + contents.PlatformBranchName = output["PlatformBranchName"]; + } + if (output["PlatformOwner"] !== undefined) { + contents.PlatformOwner = output["PlatformOwner"]; + } + if (output["PlatformArn"] !== undefined) { + contents.PlatformArn = output["PlatformArn"]; + } + if (output["OperatingSystemName"] !== undefined) { + contents.OperatingSystemName = output["OperatingSystemName"]; + } + if (output["PlatformBranchLifecycleState"] !== undefined) { + contents.PlatformBranchLifecycleState = output["PlatformBranchLifecycleState"]; + } + if (output.SupportedTierList === "") { + contents.SupportedTierList = []; + } + if (output["SupportedTierList"] !== undefined && output["SupportedTierList"]["member"] !== undefined) { + contents.SupportedTierList = deserializeAws_querySupportedTierList( + __getArrayIfSingleItem(output["SupportedTierList"]["member"]), + context + ); + } if (output.Frameworks === "") { contents.Frameworks = []; } @@ -7198,50 +7225,23 @@ const deserializeAws_queryPlatformDescription = (output: any, context: __SerdeCo context ); } - if (output["DateUpdated"] !== undefined) { - contents.DateUpdated = new Date(output["DateUpdated"]); - } - if (output.CustomAmiList === "") { - contents.CustomAmiList = []; + if (output["PlatformName"] !== undefined) { + contents.PlatformName = output["PlatformName"]; } - if (output["CustomAmiList"] !== undefined && output["CustomAmiList"]["member"] !== undefined) { - contents.CustomAmiList = deserializeAws_queryCustomAmiList( - __getArrayIfSingleItem(output["CustomAmiList"]["member"]), - context - ); + if (output["PlatformStatus"] !== undefined) { + contents.PlatformStatus = output["PlatformStatus"]; } if (output["Description"] !== undefined) { contents.Description = output["Description"]; } + if (output["SolutionStackName"] !== undefined) { + contents.SolutionStackName = output["SolutionStackName"]; + } if (output["PlatformCategory"] !== undefined) { contents.PlatformCategory = output["PlatformCategory"]; } - if (output["OperatingSystemName"] !== undefined) { - contents.OperatingSystemName = output["OperatingSystemName"]; - } - if (output["PlatformArn"] !== undefined) { - contents.PlatformArn = output["PlatformArn"]; - } - if (output["PlatformBranchLifecycleState"] !== undefined) { - contents.PlatformBranchLifecycleState = output["PlatformBranchLifecycleState"]; - } - if (output["DateCreated"] !== undefined) { - contents.DateCreated = new Date(output["DateCreated"]); - } - if (output["OperatingSystemVersion"] !== undefined) { - contents.OperatingSystemVersion = output["OperatingSystemVersion"]; - } - if (output["PlatformVersion"] !== undefined) { - contents.PlatformVersion = output["PlatformVersion"]; - } - if (output.SupportedTierList === "") { - contents.SupportedTierList = []; - } - if (output["SupportedTierList"] !== undefined && output["SupportedTierList"]["member"] !== undefined) { - contents.SupportedTierList = deserializeAws_querySupportedTierList( - __getArrayIfSingleItem(output["SupportedTierList"]["member"]), - context - ); + if (output["Maintainer"] !== undefined) { + contents.Maintainer = output["Maintainer"]; } if (output["PlatformLifecycleState"] !== undefined) { contents.PlatformLifecycleState = output["PlatformLifecycleState"]; @@ -7272,15 +7272,15 @@ const deserializeAws_queryPlatformProgrammingLanguage = ( context: __SerdeContext ): PlatformProgrammingLanguage => { let contents: any = { - Version: undefined, Name: undefined, + Version: undefined, }; - if (output["Version"] !== undefined) { - contents.Version = output["Version"]; - } if (output["Name"] !== undefined) { contents.Name = output["Name"]; } + if (output["Version"] !== undefined) { + contents.Version = output["Version"]; + } return contents; }; @@ -7293,42 +7293,48 @@ const deserializeAws_queryPlatformProgrammingLanguages = ( const deserializeAws_queryPlatformSummary = (output: any, context: __SerdeContext): PlatformSummary => { let contents: any = { - PlatformBranchLifecycleState: undefined, - PlatformCategory: undefined, + PlatformOwner: undefined, + PlatformBranchName: undefined, PlatformArn: undefined, - OperatingSystemName: undefined, PlatformStatus: undefined, - PlatformOwner: undefined, PlatformVersion: undefined, - PlatformBranchName: undefined, - PlatformLifecycleState: undefined, - SupportedTierList: undefined, SupportedAddonList: undefined, + PlatformBranchLifecycleState: undefined, OperatingSystemVersion: undefined, + PlatformLifecycleState: undefined, + SupportedTierList: undefined, + OperatingSystemName: undefined, + PlatformCategory: undefined, }; - if (output["PlatformBranchLifecycleState"] !== undefined) { - contents.PlatformBranchLifecycleState = output["PlatformBranchLifecycleState"]; + if (output["PlatformOwner"] !== undefined) { + contents.PlatformOwner = output["PlatformOwner"]; } - if (output["PlatformCategory"] !== undefined) { - contents.PlatformCategory = output["PlatformCategory"]; + if (output["PlatformBranchName"] !== undefined) { + contents.PlatformBranchName = output["PlatformBranchName"]; } if (output["PlatformArn"] !== undefined) { contents.PlatformArn = output["PlatformArn"]; } - if (output["OperatingSystemName"] !== undefined) { - contents.OperatingSystemName = output["OperatingSystemName"]; - } if (output["PlatformStatus"] !== undefined) { contents.PlatformStatus = output["PlatformStatus"]; } - if (output["PlatformOwner"] !== undefined) { - contents.PlatformOwner = output["PlatformOwner"]; - } if (output["PlatformVersion"] !== undefined) { contents.PlatformVersion = output["PlatformVersion"]; } - if (output["PlatformBranchName"] !== undefined) { - contents.PlatformBranchName = output["PlatformBranchName"]; + if (output.SupportedAddonList === "") { + contents.SupportedAddonList = []; + } + if (output["SupportedAddonList"] !== undefined && output["SupportedAddonList"]["member"] !== undefined) { + contents.SupportedAddonList = deserializeAws_querySupportedAddonList( + __getArrayIfSingleItem(output["SupportedAddonList"]["member"]), + context + ); + } + if (output["PlatformBranchLifecycleState"] !== undefined) { + contents.PlatformBranchLifecycleState = output["PlatformBranchLifecycleState"]; + } + if (output["OperatingSystemVersion"] !== undefined) { + contents.OperatingSystemVersion = output["OperatingSystemVersion"]; } if (output["PlatformLifecycleState"] !== undefined) { contents.PlatformLifecycleState = output["PlatformLifecycleState"]; @@ -7342,17 +7348,11 @@ const deserializeAws_queryPlatformSummary = (output: any, context: __SerdeContex context ); } - if (output.SupportedAddonList === "") { - contents.SupportedAddonList = []; - } - if (output["SupportedAddonList"] !== undefined && output["SupportedAddonList"]["member"] !== undefined) { - contents.SupportedAddonList = deserializeAws_querySupportedAddonList( - __getArrayIfSingleItem(output["SupportedAddonList"]["member"]), - context - ); + if (output["OperatingSystemName"] !== undefined) { + contents.OperatingSystemName = output["OperatingSystemName"]; } - if (output["OperatingSystemVersion"] !== undefined) { - contents.OperatingSystemVersion = output["OperatingSystemVersion"]; + if (output["PlatformCategory"] !== undefined) { + contents.PlatformCategory = output["PlatformCategory"]; } return contents; }; @@ -7376,15 +7376,15 @@ const deserializeAws_queryPlatformVersionStillReferencedException = ( const deserializeAws_queryQueue = (output: any, context: __SerdeContext): Queue => { let contents: any = { - Name: undefined, URL: undefined, + Name: undefined, }; - if (output["Name"] !== undefined) { - contents.Name = output["Name"]; - } if (output["URL"] !== undefined) { contents.URL = output["URL"]; } + if (output["Name"] !== undefined) { + contents.Name = output["Name"]; + } return contents; }; @@ -7417,29 +7417,29 @@ const deserializeAws_queryResourceQuota = (output: any, context: __SerdeContext) const deserializeAws_queryResourceQuotas = (output: any, context: __SerdeContext): ResourceQuotas => { let contents: any = { - ConfigurationTemplateQuota: undefined, - ApplicationQuota: undefined, - CustomPlatformQuota: undefined, ApplicationVersionQuota: undefined, + ApplicationQuota: undefined, EnvironmentQuota: undefined, + CustomPlatformQuota: undefined, + ConfigurationTemplateQuota: undefined, }; - if (output["ConfigurationTemplateQuota"] !== undefined) { - contents.ConfigurationTemplateQuota = deserializeAws_queryResourceQuota( - output["ConfigurationTemplateQuota"], - context - ); + if (output["ApplicationVersionQuota"] !== undefined) { + contents.ApplicationVersionQuota = deserializeAws_queryResourceQuota(output["ApplicationVersionQuota"], context); } if (output["ApplicationQuota"] !== undefined) { contents.ApplicationQuota = deserializeAws_queryResourceQuota(output["ApplicationQuota"], context); } + if (output["EnvironmentQuota"] !== undefined) { + contents.EnvironmentQuota = deserializeAws_queryResourceQuota(output["EnvironmentQuota"], context); + } if (output["CustomPlatformQuota"] !== undefined) { contents.CustomPlatformQuota = deserializeAws_queryResourceQuota(output["CustomPlatformQuota"], context); } - if (output["ApplicationVersionQuota"] !== undefined) { - contents.ApplicationVersionQuota = deserializeAws_queryResourceQuota(output["ApplicationVersionQuota"], context); - } - if (output["EnvironmentQuota"] !== undefined) { - contents.EnvironmentQuota = deserializeAws_queryResourceQuota(output["EnvironmentQuota"], context); + if (output["ConfigurationTemplateQuota"] !== undefined) { + contents.ConfigurationTemplateQuota = deserializeAws_queryResourceQuota( + output["ConfigurationTemplateQuota"], + context + ); } return contents; }; @@ -7449,12 +7449,9 @@ const deserializeAws_queryResourceTagsDescriptionMessage = ( context: __SerdeContext ): ResourceTagsDescriptionMessage => { let contents: any = { - ResourceArn: undefined, ResourceTags: undefined, + ResourceArn: undefined, }; - if (output["ResourceArn"] !== undefined) { - contents.ResourceArn = output["ResourceArn"]; - } if (output.ResourceTags === "") { contents.ResourceTags = []; } @@ -7464,6 +7461,9 @@ const deserializeAws_queryResourceTagsDescriptionMessage = ( context ); } + if (output["ResourceArn"] !== undefined) { + contents.ResourceArn = output["ResourceArn"]; + } return contents; }; @@ -7501,15 +7501,15 @@ const deserializeAws_queryRetrieveEnvironmentInfoResultMessage = ( const deserializeAws_queryS3Location = (output: any, context: __SerdeContext): S3Location => { let contents: any = { - S3Bucket: undefined, S3Key: undefined, + S3Bucket: undefined, }; - if (output["S3Bucket"] !== undefined) { - contents.S3Bucket = output["S3Bucket"]; - } if (output["S3Key"] !== undefined) { contents.S3Key = output["S3Key"]; } + if (output["S3Bucket"] !== undefined) { + contents.S3Bucket = output["S3Bucket"]; + } return contents; }; @@ -7541,34 +7541,40 @@ const deserializeAws_queryS3SubscriptionRequiredException = ( const deserializeAws_querySingleInstanceHealth = (output: any, context: __SerdeContext): SingleInstanceHealth => { let contents: any = { - InstanceId: undefined, - InstanceType: undefined, - Deployment: undefined, + ApplicationMetrics: undefined, System: undefined, + Causes: undefined, + InstanceId: undefined, LaunchedAt: undefined, - Color: undefined, + Deployment: undefined, + InstanceType: undefined, HealthStatus: undefined, AvailabilityZone: undefined, - ApplicationMetrics: undefined, - Causes: undefined, + Color: undefined, }; - if (output["InstanceId"] !== undefined) { - contents.InstanceId = output["InstanceId"]; - } - if (output["InstanceType"] !== undefined) { - contents.InstanceType = output["InstanceType"]; - } - if (output["Deployment"] !== undefined) { - contents.Deployment = deserializeAws_queryDeployment(output["Deployment"], context); + if (output["ApplicationMetrics"] !== undefined) { + contents.ApplicationMetrics = deserializeAws_queryApplicationMetrics(output["ApplicationMetrics"], context); } if (output["System"] !== undefined) { contents.System = deserializeAws_querySystemStatus(output["System"], context); } + if (output.Causes === "") { + contents.Causes = []; + } + if (output["Causes"] !== undefined && output["Causes"]["member"] !== undefined) { + contents.Causes = deserializeAws_queryCauses(__getArrayIfSingleItem(output["Causes"]["member"]), context); + } + if (output["InstanceId"] !== undefined) { + contents.InstanceId = output["InstanceId"]; + } if (output["LaunchedAt"] !== undefined) { contents.LaunchedAt = new Date(output["LaunchedAt"]); } - if (output["Color"] !== undefined) { - contents.Color = output["Color"]; + if (output["Deployment"] !== undefined) { + contents.Deployment = deserializeAws_queryDeployment(output["Deployment"], context); + } + if (output["InstanceType"] !== undefined) { + contents.InstanceType = output["InstanceType"]; } if (output["HealthStatus"] !== undefined) { contents.HealthStatus = output["HealthStatus"]; @@ -7576,14 +7582,8 @@ const deserializeAws_querySingleInstanceHealth = (output: any, context: __SerdeC if (output["AvailabilityZone"] !== undefined) { contents.AvailabilityZone = output["AvailabilityZone"]; } - if (output["ApplicationMetrics"] !== undefined) { - contents.ApplicationMetrics = deserializeAws_queryApplicationMetrics(output["ApplicationMetrics"], context); - } - if (output.Causes === "") { - contents.Causes = []; - } - if (output["Causes"] !== undefined && output["Causes"]["member"] !== undefined) { - contents.Causes = deserializeAws_queryCauses(__getArrayIfSingleItem(output["Causes"]["member"]), context); + if (output["Color"] !== undefined) { + contents.Color = output["Color"]; } return contents; }; @@ -7593,12 +7593,9 @@ const deserializeAws_querySolutionStackDescription = ( context: __SerdeContext ): SolutionStackDescription => { let contents: any = { - SolutionStackName: undefined, PermittedFileTypes: undefined, + SolutionStackName: undefined, }; - if (output["SolutionStackName"] !== undefined) { - contents.SolutionStackName = output["SolutionStackName"]; - } if (output.PermittedFileTypes === "") { contents.PermittedFileTypes = []; } @@ -7608,6 +7605,9 @@ const deserializeAws_querySolutionStackDescription = ( context ); } + if (output["SolutionStackName"] !== undefined) { + contents.SolutionStackName = output["SolutionStackName"]; + } return contents; }; @@ -7617,19 +7617,19 @@ const deserializeAws_querySolutionStackFileTypeList = (output: any, context: __S const deserializeAws_querySourceBuildInformation = (output: any, context: __SerdeContext): SourceBuildInformation => { let contents: any = { + SourceType: undefined, SourceLocation: undefined, SourceRepository: undefined, - SourceType: undefined, }; + if (output["SourceType"] !== undefined) { + contents.SourceType = output["SourceType"]; + } if (output["SourceLocation"] !== undefined) { contents.SourceLocation = output["SourceLocation"]; } if (output["SourceRepository"] !== undefined) { contents.SourceRepository = output["SourceRepository"]; } - if (output["SourceType"] !== undefined) { - contents.SourceType = output["SourceType"]; - } return contents; }; @@ -7648,14 +7648,11 @@ const deserializeAws_querySourceBundleDeletionException = ( const deserializeAws_queryStatusCodes = (output: any, context: __SerdeContext): StatusCodes => { let contents: any = { - Status3xx: undefined, Status4xx: undefined, Status5xx: undefined, Status2xx: undefined, + Status3xx: undefined, }; - if (output["Status3xx"] !== undefined) { - contents.Status3xx = parseInt(output["Status3xx"]); - } if (output["Status4xx"] !== undefined) { contents.Status4xx = parseInt(output["Status4xx"]); } @@ -7665,6 +7662,9 @@ const deserializeAws_queryStatusCodes = (output: any, context: __SerdeContext): if (output["Status2xx"] !== undefined) { contents.Status2xx = parseInt(output["Status2xx"]); } + if (output["Status3xx"] !== undefined) { + contents.Status3xx = parseInt(output["Status3xx"]); + } return contents; }; @@ -7698,15 +7698,15 @@ const deserializeAws_querySystemStatus = (output: any, context: __SerdeContext): const deserializeAws_queryTag = (output: any, context: __SerdeContext): Tag => { let contents: any = { - Value: undefined, Key: undefined, + Value: undefined, }; - if (output["Value"] !== undefined) { - contents.Value = output["Value"]; - } if (output["Key"] !== undefined) { contents.Key = output["Key"]; } + if (output["Value"] !== undefined) { + contents.Value = output["Value"]; + } return contents; }; @@ -7816,21 +7816,21 @@ const deserializeAws_queryTriggerList = (output: any, context: __SerdeContext): const deserializeAws_queryValidationMessage = (output: any, context: __SerdeContext): ValidationMessage => { let contents: any = { Message: undefined, - Severity: undefined, - OptionName: undefined, Namespace: undefined, + OptionName: undefined, + Severity: undefined, }; if (output["Message"] !== undefined) { contents.Message = output["Message"]; } - if (output["Severity"] !== undefined) { - contents.Severity = output["Severity"]; + if (output["Namespace"] !== undefined) { + contents.Namespace = output["Namespace"]; } if (output["OptionName"] !== undefined) { contents.OptionName = output["OptionName"]; } - if (output["Namespace"] !== undefined) { - contents.Namespace = output["Namespace"]; + if (output["Severity"] !== undefined) { + contents.Severity = output["Severity"]; } return contents; }; diff --git a/clients/client-elasticache/models/models_0.ts b/clients/client-elasticache/models/models_0.ts index c3e127a1f101..57a2612dc043 100644 --- a/clients/client-elasticache/models/models_0.ts +++ b/clients/client-elasticache/models/models_0.ts @@ -7160,7 +7160,7 @@ export interface DescribeReservedCacheNodesOfferingsMessage { /** *The offering type filter value. * Use this parameter to show only the available offerings matching the specified offering type.
- *Valid Values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization"
+ *
Valid Values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" |"All Upfront"|"Partial Upfront"| "No Upfront"
*
Amazon EMR is a web service that makes it easy to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS products to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehousing.
+ *Amazon EMR is a web service that makes it easier to process large amounts of data + * efficiently. Amazon EMR uses Hadoop processing combined with several AWS services to do + * tasks such as web indexing, data mining, log file analysis, machine learning, scientific + * simulation, and data warehouse management.
*/ export class EMR extends EMRClient { /** *Adds an instance fleet to a running cluster.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x.
*AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps are allowed in each job flow.
- *If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using SSH to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.
- *A step specifies the location of a JAR file stored either on the master node of the cluster or in Amazon S3. Each step is performed by the main function of the main class of the JAR file. The main class can be specified either in the manifest of the JAR or by using the MainFunction parameter of the step.
- *Amazon EMR executes each step in the order listed. For a step to be considered complete, the main function must exit with a zero exit code and all Hadoop jobs started while the step was running must have completed and run successfully.
- *You can only add steps to a cluster that is in one of the following states: STARTING, BOOTSTRAPPING, RUNNING, or WAITING.
+ *AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps are allowed + * in each job flow.
+ *If your cluster is long-running (such as a Hive data warehouse) or complex, you may + * require more than 256 steps to process your data. You can bypass the 256-step limitation in + * various ways, including using SSH to connect to the master node and submitting queries + * directly to the software running on the master node, such as Hive and Hadoop. For more + * information on how to do this, see Add More than 256 Steps to a + * Cluster in the Amazon EMR Management Guide.
+ *A step specifies the location of a JAR file stored either on the master node of the + * cluster or in Amazon S3. Each step is performed by the main function of the main class of + * the JAR file. The main class can be specified either in the manifest of the JAR or by using + * the MainFunction parameter of the step.
+ *Amazon EMR executes each step in the order listed. For a step to be considered complete, + * the main function must exit with a zero exit code and all Hadoop jobs started while the + * step was running must have completed and run successfully.
+ *You can only add steps to a cluster that is in one of the following states: STARTING, + * BOOTSTRAPPING, RUNNING, or WAITING.
*/ public addJobFlowSteps( args: AddJobFlowStepsCommandInput, @@ -274,9 +331,9 @@ export class EMR extends EMRClient { } /** - *Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. - * For more information, see Tag Clusters. - *
+ *Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in + * various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. + * For more information, see Tag Clusters.
*/ public addTags(args: AddTagsCommandInput, options?: __HttpHandlerOptions): PromiseCancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a PENDING
state.
Cancels a pending step or steps in a running cluster. Available only in Amazon EMR
+ * versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in
+ * each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee
+ * that a step will be canceled, even if the request is successfully submitted. You can only
+ * cancel steps that are in a PENDING
state.
Creates a security configuration, which is stored in the service and can be specified when a cluster is created.
+ *Creates a security configuration, which is stored in the service and can be specified + * when a cluster is created.
*/ public createSecurityConfiguration( args: CreateSecurityConfigurationCommandInput, @@ -359,6 +421,76 @@ export class EMR extends EMRClient { } } + /** + *The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Creates a new Amazon EMR Studio.
+ */ + public createStudio( + args: CreateStudioCommandInput, + options?: __HttpHandlerOptions + ): PromiseThe Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Maps a user or group to the Amazon EMR Studio specified by StudioId
, and
+ * applies a session policy to refine Studio permissions for that user or group.
Deletes a security configuration.
*/ @@ -392,7 +524,77 @@ export class EMR extends EMRClient { } /** - *Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on.
+ *The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Removes an Amazon EMR Studio from the Studio metadata store.
+ */ + public deleteStudio( + args: DeleteStudioCommandInput, + options?: __HttpHandlerOptions + ): PromiseThe Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Removes a user or group from an Amazon EMR Studio.
+ */ + public deleteStudioSessionMapping( + args: DeleteStudioSessionMappingCommandInput, + options?: __HttpHandlerOptions + ): PromiseProvides cluster-level details including status, hardware and software configuration, + * VPC settings, and so on.
*/ public describeCluster( args: DescribeClusterCommandInput, @@ -424,19 +626,23 @@ export class EMR extends EMRClient { } /** - *This API is deprecated and will eventually be removed. We recommend you use ListClusters, - * DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions - * instead.
- *DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.
- *Regardless of supplied parameters, only job flows created within the last two months are returned.
- *If no parameters are supplied, then job flows matching either of the following criteria are returned:
+ *This API is no longer supported and will eventually be removed. We recommend you use + * ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.
+ *DescribeJobFlows returns a list of job flows that match all of the supplied parameters. + * The parameters can include a list of job flow IDs, job flow states, and restrictions on job + * flow creation date and time.
+ *Regardless of supplied parameters, only job flows created within the last two months are + * returned.
+ *If no parameters are supplied, then job flows matching either of the following criteria + * are returned:
*Job flows created and completed in the last two weeks
* Job flows created within the last two months that are in one of the following states: RUNNING
, WAITING
, SHUTTING_DOWN
,
- * STARTING
+ *
Job flows created within the last two months that are in one of the following
+ * states: RUNNING
, WAITING
, SHUTTING_DOWN
,
+ * STARTING
*
Provides the details of a security configuration by returning the configuration JSON.
+ *Provides the details of a security configuration by returning the configuration + * JSON.
*/ public describeSecurityConfiguration( args: DescribeSecurityConfigurationCommandInput, @@ -565,7 +772,47 @@ export class EMR extends EMRClient { } /** - *Returns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.
+ *The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio + * access URL, and so on.
+ */ + public describeStudio( + args: DescribeStudioCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns the Amazon EMR block public access configuration for your AWS account in the + * current Region. For more information see Configure Block + * Public Access for Amazon EMR in the Amazon EMR Management + * Guide.
*/ public getBlockPublicAccessConfiguration( args: GetBlockPublicAccessConfigurationCommandInput, @@ -597,9 +844,7 @@ export class EMR extends EMRClient { } /** - *- * Fetches the attached managed scaling policy for an Amazon EMR cluster. - *
+ *Fetches the attached managed scaling policy for an Amazon EMR cluster.
*/ public getManagedScalingPolicy( args: GetManagedScalingPolicyCommandInput, @@ -630,6 +875,43 @@ export class EMR extends EMRClient { } } + /** + *The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Fetches mapping details for the specified Amazon EMR Studio and identity (user or + * group).
+ */ + public getStudioSessionMapping( + args: GetStudioSessionMappingCommandInput, + options?: __HttpHandlerOptions + ): PromiseProvides information about the bootstrap actions associated with a cluster.
*/ @@ -663,7 +945,11 @@ export class EMR extends EMRClient { } /** - *Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.
+ *Provides the status of all clusters visible to this AWS account. Allows you to filter + * the list of clusters based on certain criteria; for example, filtering by cluster creation + * date and time or by status. This call returns a maximum of 50 clusters per call, but + * returns a marker to track the paging of the cluster list across multiple ListClusters + * calls.
*/ public listClusters( args: ListClustersCommandInput, @@ -694,7 +980,8 @@ export class EMR extends EMRClient { /** *Lists all available details about the instance fleets in a cluster.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.
+ *Provides information for all active EC2 instances and EC2 instances terminated in the + * last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are + * considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.
*/ public listInstances( args: ListInstancesCommandInput, @@ -791,7 +1080,10 @@ export class EMR extends EMRClient { } /** - *Provides summaries of all notebook executions. You can filter the list based on multiple criteria such as status, time range, and editor id. Returns a maximum of 50 notebook executions and a marker to track the paging of a longer notebook execution list across multiple ListNotebookExecution
calls.
Provides summaries of all notebook executions. You can filter the list based on multiple
+ * criteria such as status, time range, and editor id. Returns a maximum of 50 notebook
+ * executions and a marker to track the paging of a longer notebook execution list across
+ * multiple ListNotebookExecution
calls.
Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls.
+ *Lists all the security configurations visible to this account, providing their creation + * dates and times, and their names. This call returns a maximum of 50 clusters per call, but + * returns a marker to track the paging of the cluster list across multiple + * ListSecurityConfigurations calls.
*/ public listSecurityConfigurations( args: ListSecurityConfigurationsCommandInput, @@ -855,7 +1150,9 @@ export class EMR extends EMRClient { } /** - *Provides a list of steps for the cluster in reverse order unless you specify stepIds
with the request of filter by StepStates
. You can specify a maximum of ten stepIDs
.
Provides a list of steps for the cluster in reverse order unless you specify
+ * stepIds
with the request of filter by StepStates
. You can
+ * specify a maximum of ten stepIDs
.
Modifies the number of steps that can be executed concurrently for the cluster specified using ClusterID.
+ *The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Returns a list of all Amazon EMR Studios associated with the AWS account. The list + * includes details such as ID, Studio Access URL, and creation time for each Studio.
+ */ + public listStudios(args: ListStudiosCommandInput, options?: __HttpHandlerOptions): PromiseThe Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Returns a list of all user or group session mappings for the EMR Studio specified by
+ * StudioId
.
Modifies the number of steps that can be executed concurrently for the cluster specified + * using ClusterID.
*/ public modifyCluster( args: ModifyClusterCommandInput, @@ -913,9 +1279,12 @@ export class EMR extends EMRClient { } /** - *Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.
+ *Modifies the target On-Demand and target Spot capacities for the instance fleet with the + * specified InstanceFleetID within the cluster specified using ClusterID. The call either + * succeeds or fails atomically.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.
+ *ModifyInstanceGroups modifies the number of nodes and configuration settings of an + * instance group. The input parameters include the new target instance count for the group + * and the instance group ID. The call will either succeed or fail atomically.
*/ public modifyInstanceGroups( args: ModifyInstanceGroupsCommandInput, @@ -980,7 +1351,10 @@ export class EMR extends EMRClient { } /** - *Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric.
+ *Creates or updates an automatic scaling policy for a core instance group or task + * instance group in an Amazon EMR cluster. The automatic scaling policy defines how an + * instance group dynamically adds and terminates EC2 instances in response to the value of a + * CloudWatch metric.
*/ public putAutoScalingPolicy( args: PutAutoScalingPolicyCommandInput, @@ -1012,7 +1386,10 @@ export class EMR extends EMRClient { } /** - *Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.
+ *Creates or updates an Amazon EMR block public access configuration for your AWS account + * in the current Region. For more information see Configure Block + * Public Access for Amazon EMR in the Amazon EMR Management + * Guide.
*/ public putBlockPublicAccessConfiguration( args: PutBlockPublicAccessConfigurationCommandInput, @@ -1044,9 +1421,10 @@ export class EMR extends EMRClient { } /** - *- * Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration. - *
+ *Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed + * scaling policy defines the limits for resources, such as EC2 instances that can be added or + * terminated from a cluster. The policy only applies to the core and task nodes. The master + * node cannot be scaled after initial configuration.
*/ public putManagedScalingPolicy( args: PutManagedScalingPolicyCommandInput, @@ -1078,7 +1456,8 @@ export class EMR extends EMRClient { } /** - *Removes an automatic scaling policy from a specified instance group within an EMR cluster.
+ *Removes an automatic scaling policy from a specified instance group within an EMR + * cluster.
*/ public removeAutoScalingPolicy( args: RemoveAutoScalingPolicyCommandInput, @@ -1110,9 +1489,7 @@ export class EMR extends EMRClient { } /** - *- * Removes a managed scaling policy from a specified EMR cluster. - *
+ *Removes a managed scaling policy from a specified EMR cluster.
*/ public removeManagedScalingPolicy( args: RemoveManagedScalingPolicyCommandInput, @@ -1144,9 +1521,9 @@ export class EMR extends EMRClient { } /** - *Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. - * For more information, see Tag Clusters. - *
+ *Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in + * various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. + * For more information, see Tag Clusters.
*The following example removes the stack tag with value Prod from a cluster:
*/ public removeTags(args: RemoveTagsCommandInput, options?: __HttpHandlerOptions): PromiseRunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps - * specified. After the steps complete, the cluster stops and the HDFS partition is + *
RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the
+ * steps specified. After the steps complete, the cluster stops and the HDFS partition is
* lost. To prevent loss of data, configure the last step of the job flow to store results in
* Amazon S3. If the JobFlowInstancesConfig
- * KeepJobFlowAliveWhenNoSteps
parameter is
- * set to TRUE
, the cluster transitions to the WAITING state rather than shutting down after the steps have completed.
For additional protection, you can set the
- * JobFlowInstancesConfig
- * TerminationProtected
parameter to TRUE
to lock the
- * cluster and prevent it from being
- * terminated by API call, user intervention, or in the event of a job flow error.
KeepJobFlowAliveWhenNoSteps
parameter is set to TRUE
, the cluster
+ * transitions to the WAITING state rather than shutting down after the steps have completed.
+ * For additional protection, you can set the JobFlowInstancesConfig
+ * TerminationProtected
parameter to TRUE
to lock the cluster and
+ * prevent it from being terminated by API call, user intervention, or in the event of a job
+ * flow error.
A maximum of 256 steps are allowed in each job flow.
- *If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.
+ *If your cluster is long-running (such as a Hive data warehouse) or complex, you may + * require more than 256 steps to process your data. You can bypass the 256-step limitation in + * various ways, including using the SSH shell to connect to the master node and submitting + * queries directly to the software running on the master node, such as Hive and Hadoop. For + * more information on how to do this, see Add More than 256 Steps to a + * Cluster in the Amazon EMR Management Guide.
*For long running clusters, we recommend that you periodically store your results.
*The instance fleets configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but not both.
+ *The instance fleets configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets + * parameters or InstanceGroups parameters, but not both.
*SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection
on a cluster is similar to calling the Amazon EC2 DisableAPITermination
API on all EC2 instances in a cluster.
SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the cluster
+ * cannot be terminated by user intervention, an API call, or in the event of a job-flow
+ * error. The cluster still terminates upon successful completion of the job flow. Calling
+ * SetTerminationProtection
on a cluster is similar to calling the Amazon EC2
+ * DisableAPITermination
API on all EC2 instances in a cluster.
- * SetTerminationProtection
is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.
To terminate a cluster that has been locked by setting SetTerminationProtection
to true
,
- * you must first unlock the job flow by a subsequent call to SetTerminationProtection
- * in which you set the value to false
.
For more information, seeManaging Cluster Termination in the - * Amazon EMR Management Guide. - *
+ *SetTerminationProtection
is used to prevent accidental termination of a
+ * cluster and to ensure that in the event of an error, the instances persist so that you can
+ * recover any data stored in their ephemeral instance storage.
+ * To terminate a cluster that has been locked by setting
+ * SetTerminationProtection
to true
, you must first unlock the
+ * job flow by a subsequent call to SetTerminationProtection
in which you set the
+ * value to false
.
For more information, seeManaging Cluster + * Termination in the Amazon EMR Management Guide.
*/ public setTerminationProtection( args: SetTerminationProtectionCommandInput, @@ -1258,7 +1644,15 @@ export class EMR extends EMRClient { } /** - *Sets the Cluster$VisibleToAllUsers value, which determines whether the cluster is visible to all IAM users of the AWS account associated with the cluster. Only the IAM user who created the cluster or the AWS account root user can call this action. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If set to false
, only the IAM user that created the cluster can perform actions. This action works on running clusters. You can override the default true
setting when you create a cluster by using the VisibleToAllUsers
parameter with RunJobFlow
.
Sets the Cluster$VisibleToAllUsers value, which determines whether the
+ * cluster is visible to all IAM users of the AWS account associated with the cluster. Only
+ * the IAM user who created the cluster or the AWS account root user can call this action. The
+ * default value, true
, indicates that all IAM users in the AWS account can
+ * perform cluster actions if they have the proper IAM policy permissions. If set to
+ * false
, only the IAM user that created the cluster can perform actions. This
+ * action works on running clusters. You can override the default true
setting
+ * when you create a cluster by using the VisibleToAllUsers
parameter with
+ * RunJobFlow
.
TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.
- *The maximum number of clusters allowed is 10. The call to TerminateJobFlows
is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.
TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut + * down, any step not yet completed is canceled and the EC2 instances on which the cluster is + * running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri + * was specified when the cluster was created.
+ *The maximum number of clusters allowed is 10. The call to TerminateJobFlows
+ * is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5
+ * minutes for the cluster to completely terminate and release allocated resources, such as
+ * Amazon EC2 instances.
The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Updates the session policy attached to the user or group for the specified Amazon EMR + * Studio.
+ */ + public updateStudioSessionMapping( + args: UpdateStudioSessionMappingCommandInput, + options?: __HttpHandlerOptions + ): PromiseAmazon EMR is a web service that makes it easy to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS products to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehousing.
+ *Amazon EMR is a web service that makes it easier to process large amounts of data + * efficiently. Amazon EMR uses Hadoop processing combined with several AWS services to do + * tasks such as web indexing, data mining, log file analysis, machine learning, scientific + * simulation, and data warehouse management.
*/ export class EMRClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-emr/commands/AddInstanceFleetCommand.ts b/clients/client-emr/commands/AddInstanceFleetCommand.ts index f1a0b71a0dd0..0dba294faec2 100644 --- a/clients/client-emr/commands/AddInstanceFleetCommand.ts +++ b/clients/client-emr/commands/AddInstanceFleetCommand.ts @@ -23,7 +23,8 @@ export type AddInstanceFleetCommandOutput = AddInstanceFleetOutput & __MetadataB /** *Adds an instance fleet to a running cluster.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x.
*AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps are allowed in each job flow.
- *If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using SSH to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.
- *A step specifies the location of a JAR file stored either on the master node of the cluster or in Amazon S3. Each step is performed by the main function of the main class of the JAR file. The main class can be specified either in the manifest of the JAR or by using the MainFunction parameter of the step.
- *Amazon EMR executes each step in the order listed. For a step to be considered complete, the main function must exit with a zero exit code and all Hadoop jobs started while the step was running must have completed and run successfully.
- *You can only add steps to a cluster that is in one of the following states: STARTING, BOOTSTRAPPING, RUNNING, or WAITING.
+ *AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps are allowed + * in each job flow.
+ *If your cluster is long-running (such as a Hive data warehouse) or complex, you may + * require more than 256 steps to process your data. You can bypass the 256-step limitation in + * various ways, including using SSH to connect to the master node and submitting queries + * directly to the software running on the master node, such as Hive and Hadoop. For more + * information on how to do this, see Add More than 256 Steps to a + * Cluster in the Amazon EMR Management Guide.
+ *A step specifies the location of a JAR file stored either on the master node of the + * cluster or in Amazon S3. Each step is performed by the main function of the main class of + * the JAR file. The main class can be specified either in the manifest of the JAR or by using + * the MainFunction parameter of the step.
+ *Amazon EMR executes each step in the order listed. For a step to be considered complete, + * the main function must exit with a zero exit code and all Hadoop jobs started while the + * step was running must have completed and run successfully.
+ *You can only add steps to a cluster that is in one of the following states: STARTING, + * BOOTSTRAPPING, RUNNING, or WAITING.
*/ export class AddJobFlowStepsCommand extends $Command< AddJobFlowStepsCommandInput, diff --git a/clients/client-emr/commands/AddTagsCommand.ts b/clients/client-emr/commands/AddTagsCommand.ts index 6b3d179a71f7..ab3253c5465e 100644 --- a/clients/client-emr/commands/AddTagsCommand.ts +++ b/clients/client-emr/commands/AddTagsCommand.ts @@ -18,9 +18,9 @@ export type AddTagsCommandInput = AddTagsInput; export type AddTagsCommandOutput = AddTagsOutput & __MetadataBearer; /** - *Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. - * For more information, see Tag Clusters. - *
+ *Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in + * various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. + * For more information, see Tag Clusters.
*/ export class AddTagsCommand extends $CommandCancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a PENDING
state.
Cancels a pending step or steps in a running cluster. Available only in Amazon EMR
+ * versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in
+ * each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee
+ * that a step will be canceled, even if the request is successfully submitted. You can only
+ * cancel steps that are in a PENDING
state.
Creates a security configuration, which is stored in the service and can be specified when a cluster is created.
+ *Creates a security configuration, which is stored in the service and can be specified + * when a cluster is created.
*/ export class CreateSecurityConfigurationCommand extends $Command< CreateSecurityConfigurationCommandInput, diff --git a/clients/client-emr/commands/CreateStudioCommand.ts b/clients/client-emr/commands/CreateStudioCommand.ts new file mode 100644 index 000000000000..ac4d81960459 --- /dev/null +++ b/clients/client-emr/commands/CreateStudioCommand.ts @@ -0,0 +1,92 @@ +import { EMRClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EMRClient"; +import { CreateStudioInput, CreateStudioOutput } from "../models/models_0"; +import { + deserializeAws_json1_1CreateStudioCommand, + serializeAws_json1_1CreateStudioCommand, +} from "../protocols/Aws_json1_1"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export type CreateStudioCommandInput = CreateStudioInput; +export type CreateStudioCommandOutput = CreateStudioOutput & __MetadataBearer; + +/** + *The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Creates a new Amazon EMR Studio.
+ */ +export class CreateStudioCommand extends $Command< + CreateStudioCommandInput, + CreateStudioCommandOutput, + EMRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateStudioCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Maps a user or group to the Amazon EMR Studio specified by StudioId
, and
+ * applies a session policy to refine Studio permissions for that user or group.
The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Removes an Amazon EMR Studio from the Studio metadata store.
+ */ +export class DeleteStudioCommand extends $Command< + DeleteStudioCommandInput, + DeleteStudioCommandOutput, + EMRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteStudioCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Removes a user or group from an Amazon EMR Studio.
+ */ +export class DeleteStudioSessionMappingCommand extends $Command< + DeleteStudioSessionMappingCommandInput, + DeleteStudioSessionMappingCommandOutput, + EMRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteStudioSessionMappingCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackProvides cluster-level details including status, hardware and software configuration, VPC settings, and so on.
+ *Provides cluster-level details including status, hardware and software configuration, + * VPC settings, and so on.
*/ export class DescribeClusterCommand extends $Command< DescribeClusterCommandInput, diff --git a/clients/client-emr/commands/DescribeJobFlowsCommand.ts b/clients/client-emr/commands/DescribeJobFlowsCommand.ts index 6daa517d0c92..06ed4b70badc 100644 --- a/clients/client-emr/commands/DescribeJobFlowsCommand.ts +++ b/clients/client-emr/commands/DescribeJobFlowsCommand.ts @@ -21,19 +21,23 @@ export type DescribeJobFlowsCommandInput = DescribeJobFlowsInput; export type DescribeJobFlowsCommandOutput = DescribeJobFlowsOutput & __MetadataBearer; /** - *This API is deprecated and will eventually be removed. We recommend you use ListClusters, - * DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions - * instead.
- *DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.
- *Regardless of supplied parameters, only job flows created within the last two months are returned.
- *If no parameters are supplied, then job flows matching either of the following criteria are returned:
+ *This API is no longer supported and will eventually be removed. We recommend you use + * ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.
+ *DescribeJobFlows returns a list of job flows that match all of the supplied parameters. + * The parameters can include a list of job flow IDs, job flow states, and restrictions on job + * flow creation date and time.
+ *Regardless of supplied parameters, only job flows created within the last two months are + * returned.
+ *If no parameters are supplied, then job flows matching either of the following criteria + * are returned:
*Job flows created and completed in the last two weeks
* Job flows created within the last two months that are in one of the following states: RUNNING
, WAITING
, SHUTTING_DOWN
,
- * STARTING
+ *
Job flows created within the last two months that are in one of the following
+ * states: RUNNING
, WAITING
, SHUTTING_DOWN
,
+ * STARTING
*
Provides the details of a security configuration by returning the configuration JSON.
+ *Provides the details of a security configuration by returning the configuration + * JSON.
*/ export class DescribeSecurityConfigurationCommand extends $Command< DescribeSecurityConfigurationCommandInput, diff --git a/clients/client-emr/commands/DescribeStudioCommand.ts b/clients/client-emr/commands/DescribeStudioCommand.ts new file mode 100644 index 000000000000..fdf3ef0eb420 --- /dev/null +++ b/clients/client-emr/commands/DescribeStudioCommand.ts @@ -0,0 +1,93 @@ +import { EMRClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EMRClient"; +import { DescribeStudioInput, DescribeStudioOutput } from "../models/models_0"; +import { + deserializeAws_json1_1DescribeStudioCommand, + serializeAws_json1_1DescribeStudioCommand, +} from "../protocols/Aws_json1_1"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export type DescribeStudioCommandInput = DescribeStudioInput; +export type DescribeStudioCommandOutput = DescribeStudioOutput & __MetadataBearer; + +/** + *The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio + * access URL, and so on.
+ */ +export class DescribeStudioCommand extends $Command< + DescribeStudioCommandInput, + DescribeStudioCommandOutput, + EMRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeStudioCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.
+ *Returns the Amazon EMR block public access configuration for your AWS account in the + * current Region. For more information see Configure Block + * Public Access for Amazon EMR in the Amazon EMR Management + * Guide.
*/ export class GetBlockPublicAccessConfigurationCommand extends $Command< GetBlockPublicAccessConfigurationCommandInput, diff --git a/clients/client-emr/commands/GetManagedScalingPolicyCommand.ts b/clients/client-emr/commands/GetManagedScalingPolicyCommand.ts index 3444fb92185a..119086e5581f 100644 --- a/clients/client-emr/commands/GetManagedScalingPolicyCommand.ts +++ b/clients/client-emr/commands/GetManagedScalingPolicyCommand.ts @@ -21,9 +21,7 @@ export type GetManagedScalingPolicyCommandInput = GetManagedScalingPolicyInput; export type GetManagedScalingPolicyCommandOutput = GetManagedScalingPolicyOutput & __MetadataBearer; /** - *- * Fetches the attached managed scaling policy for an Amazon EMR cluster. - *
+ *Fetches the attached managed scaling policy for an Amazon EMR cluster.
*/ export class GetManagedScalingPolicyCommand extends $Command< GetManagedScalingPolicyCommandInput, diff --git a/clients/client-emr/commands/GetStudioSessionMappingCommand.ts b/clients/client-emr/commands/GetStudioSessionMappingCommand.ts new file mode 100644 index 000000000000..dbec7e1dabd2 --- /dev/null +++ b/clients/client-emr/commands/GetStudioSessionMappingCommand.ts @@ -0,0 +1,93 @@ +import { EMRClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EMRClient"; +import { GetStudioSessionMappingInput, GetStudioSessionMappingOutput } from "../models/models_0"; +import { + deserializeAws_json1_1GetStudioSessionMappingCommand, + serializeAws_json1_1GetStudioSessionMappingCommand, +} from "../protocols/Aws_json1_1"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export type GetStudioSessionMappingCommandInput = GetStudioSessionMappingInput; +export type GetStudioSessionMappingCommandOutput = GetStudioSessionMappingOutput & __MetadataBearer; + +/** + *The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Fetches mapping details for the specified Amazon EMR Studio and identity (user or + * group).
+ */ +export class GetStudioSessionMappingCommand extends $Command< + GetStudioSessionMappingCommandInput, + GetStudioSessionMappingCommandOutput, + EMRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetStudioSessionMappingCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackProvides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.
+ *Provides the status of all clusters visible to this AWS account. Allows you to filter + * the list of clusters based on certain criteria; for example, filtering by cluster creation + * date and time or by status. This call returns a maximum of 50 clusters per call, but + * returns a marker to track the paging of the cluster list across multiple ListClusters + * calls.
*/ export class ListClustersCommand extends $Command< ListClustersCommandInput, diff --git a/clients/client-emr/commands/ListInstanceFleetsCommand.ts b/clients/client-emr/commands/ListInstanceFleetsCommand.ts index 73088369f378..ccf5c52366ab 100644 --- a/clients/client-emr/commands/ListInstanceFleetsCommand.ts +++ b/clients/client-emr/commands/ListInstanceFleetsCommand.ts @@ -23,7 +23,8 @@ export type ListInstanceFleetsCommandOutput = ListInstanceFleetsOutput & __Metad /** *Lists all available details about the instance fleets in a cluster.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.
+ *Provides information for all active EC2 instances and EC2 instances terminated in the + * last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are + * considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.
*/ export class ListInstancesCommand extends $Command< ListInstancesCommandInput, diff --git a/clients/client-emr/commands/ListNotebookExecutionsCommand.ts b/clients/client-emr/commands/ListNotebookExecutionsCommand.ts index d0935c875daf..3d1d32a66054 100644 --- a/clients/client-emr/commands/ListNotebookExecutionsCommand.ts +++ b/clients/client-emr/commands/ListNotebookExecutionsCommand.ts @@ -21,7 +21,10 @@ export type ListNotebookExecutionsCommandInput = ListNotebookExecutionsInput; export type ListNotebookExecutionsCommandOutput = ListNotebookExecutionsOutput & __MetadataBearer; /** - *Provides summaries of all notebook executions. You can filter the list based on multiple criteria such as status, time range, and editor id. Returns a maximum of 50 notebook executions and a marker to track the paging of a longer notebook execution list across multiple ListNotebookExecution
calls.
Provides summaries of all notebook executions. You can filter the list based on multiple
+ * criteria such as status, time range, and editor id. Returns a maximum of 50 notebook
+ * executions and a marker to track the paging of a longer notebook execution list across
+ * multiple ListNotebookExecution
calls.
Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls.
+ *Lists all the security configurations visible to this account, providing their creation + * dates and times, and their names. This call returns a maximum of 50 clusters per call, but + * returns a marker to track the paging of the cluster list across multiple + * ListSecurityConfigurations calls.
*/ export class ListSecurityConfigurationsCommand extends $Command< ListSecurityConfigurationsCommandInput, diff --git a/clients/client-emr/commands/ListStepsCommand.ts b/clients/client-emr/commands/ListStepsCommand.ts index f174d866bad8..aea8faba10a9 100644 --- a/clients/client-emr/commands/ListStepsCommand.ts +++ b/clients/client-emr/commands/ListStepsCommand.ts @@ -18,7 +18,9 @@ export type ListStepsCommandInput = ListStepsInput; export type ListStepsCommandOutput = ListStepsOutput & __MetadataBearer; /** - *Provides a list of steps for the cluster in reverse order unless you specify stepIds
with the request of filter by StepStates
. You can specify a maximum of ten stepIDs
.
Provides a list of steps for the cluster in reverse order unless you specify
+ * stepIds
with the request of filter by StepStates
. You can
+ * specify a maximum of ten stepIDs
.
The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Returns a list of all user or group session mappings for the EMR Studio specified by
+ * StudioId
.
The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Returns a list of all Amazon EMR Studios associated with the AWS account. The list + * includes details such as ID, Studio Access URL, and creation time for each Studio.
+ */ +export class ListStudiosCommand extends $Command< + ListStudiosCommandInput, + ListStudiosCommandOutput, + EMRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListStudiosCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackModifies the number of steps that can be executed concurrently for the cluster specified using ClusterID.
+ *Modifies the number of steps that can be executed concurrently for the cluster specified + * using ClusterID.
*/ export class ModifyClusterCommand extends $Command< ModifyClusterCommandInput, diff --git a/clients/client-emr/commands/ModifyInstanceFleetCommand.ts b/clients/client-emr/commands/ModifyInstanceFleetCommand.ts index 6585b495a188..bbcee89b950d 100644 --- a/clients/client-emr/commands/ModifyInstanceFleetCommand.ts +++ b/clients/client-emr/commands/ModifyInstanceFleetCommand.ts @@ -21,9 +21,12 @@ export type ModifyInstanceFleetCommandInput = ModifyInstanceFleetInput; export type ModifyInstanceFleetCommandOutput = __MetadataBearer; /** - *Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.
+ *Modifies the target On-Demand and target Spot capacities for the instance fleet with the + * specified InstanceFleetID within the cluster specified using ClusterID. The call either + * succeeds or fails atomically.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.
+ *ModifyInstanceGroups modifies the number of nodes and configuration settings of an + * instance group. The input parameters include the new target instance count for the group + * and the instance group ID. The call will either succeed or fail atomically.
*/ export class ModifyInstanceGroupsCommand extends $Command< ModifyInstanceGroupsCommandInput, diff --git a/clients/client-emr/commands/PutAutoScalingPolicyCommand.ts b/clients/client-emr/commands/PutAutoScalingPolicyCommand.ts index fdc22c6ba2fb..f7e4f6e4877b 100644 --- a/clients/client-emr/commands/PutAutoScalingPolicyCommand.ts +++ b/clients/client-emr/commands/PutAutoScalingPolicyCommand.ts @@ -21,7 +21,10 @@ export type PutAutoScalingPolicyCommandInput = PutAutoScalingPolicyInput; export type PutAutoScalingPolicyCommandOutput = PutAutoScalingPolicyOutput & __MetadataBearer; /** - *Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric.
+ *Creates or updates an automatic scaling policy for a core instance group or task + * instance group in an Amazon EMR cluster. The automatic scaling policy defines how an + * instance group dynamically adds and terminates EC2 instances in response to the value of a + * CloudWatch metric.
*/ export class PutAutoScalingPolicyCommand extends $Command< PutAutoScalingPolicyCommandInput, diff --git a/clients/client-emr/commands/PutBlockPublicAccessConfigurationCommand.ts b/clients/client-emr/commands/PutBlockPublicAccessConfigurationCommand.ts index 9f45805fab0a..caaeeae39556 100644 --- a/clients/client-emr/commands/PutBlockPublicAccessConfigurationCommand.ts +++ b/clients/client-emr/commands/PutBlockPublicAccessConfigurationCommand.ts @@ -21,7 +21,10 @@ export type PutBlockPublicAccessConfigurationCommandInput = PutBlockPublicAccess export type PutBlockPublicAccessConfigurationCommandOutput = PutBlockPublicAccessConfigurationOutput & __MetadataBearer; /** - *Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.
+ *Creates or updates an Amazon EMR block public access configuration for your AWS account + * in the current Region. For more information see Configure Block + * Public Access for Amazon EMR in the Amazon EMR Management + * Guide.
*/ export class PutBlockPublicAccessConfigurationCommand extends $Command< PutBlockPublicAccessConfigurationCommandInput, diff --git a/clients/client-emr/commands/PutManagedScalingPolicyCommand.ts b/clients/client-emr/commands/PutManagedScalingPolicyCommand.ts index 90b8e743a1e5..4034dfd20805 100644 --- a/clients/client-emr/commands/PutManagedScalingPolicyCommand.ts +++ b/clients/client-emr/commands/PutManagedScalingPolicyCommand.ts @@ -21,9 +21,10 @@ export type PutManagedScalingPolicyCommandInput = PutManagedScalingPolicyInput; export type PutManagedScalingPolicyCommandOutput = PutManagedScalingPolicyOutput & __MetadataBearer; /** - *- * Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration. - *
+ *Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed + * scaling policy defines the limits for resources, such as EC2 instances that can be added or + * terminated from a cluster. The policy only applies to the core and task nodes. The master + * node cannot be scaled after initial configuration.
*/ export class PutManagedScalingPolicyCommand extends $Command< PutManagedScalingPolicyCommandInput, diff --git a/clients/client-emr/commands/RemoveAutoScalingPolicyCommand.ts b/clients/client-emr/commands/RemoveAutoScalingPolicyCommand.ts index 32c40d2b5314..0fbaf9846af0 100644 --- a/clients/client-emr/commands/RemoveAutoScalingPolicyCommand.ts +++ b/clients/client-emr/commands/RemoveAutoScalingPolicyCommand.ts @@ -21,7 +21,8 @@ export type RemoveAutoScalingPolicyCommandInput = RemoveAutoScalingPolicyInput; export type RemoveAutoScalingPolicyCommandOutput = RemoveAutoScalingPolicyOutput & __MetadataBearer; /** - *Removes an automatic scaling policy from a specified instance group within an EMR cluster.
+ *Removes an automatic scaling policy from a specified instance group within an EMR + * cluster.
*/ export class RemoveAutoScalingPolicyCommand extends $Command< RemoveAutoScalingPolicyCommandInput, diff --git a/clients/client-emr/commands/RemoveManagedScalingPolicyCommand.ts b/clients/client-emr/commands/RemoveManagedScalingPolicyCommand.ts index 00699f146357..d0d1bf07bf2e 100644 --- a/clients/client-emr/commands/RemoveManagedScalingPolicyCommand.ts +++ b/clients/client-emr/commands/RemoveManagedScalingPolicyCommand.ts @@ -21,9 +21,7 @@ export type RemoveManagedScalingPolicyCommandInput = RemoveManagedScalingPolicyI export type RemoveManagedScalingPolicyCommandOutput = RemoveManagedScalingPolicyOutput & __MetadataBearer; /** - *- * Removes a managed scaling policy from a specified EMR cluster. - *
+ *Removes a managed scaling policy from a specified EMR cluster.
*/ export class RemoveManagedScalingPolicyCommand extends $Command< RemoveManagedScalingPolicyCommandInput, diff --git a/clients/client-emr/commands/RemoveTagsCommand.ts b/clients/client-emr/commands/RemoveTagsCommand.ts index cf3a4309ccef..57f289a96607 100644 --- a/clients/client-emr/commands/RemoveTagsCommand.ts +++ b/clients/client-emr/commands/RemoveTagsCommand.ts @@ -21,9 +21,9 @@ export type RemoveTagsCommandInput = RemoveTagsInput; export type RemoveTagsCommandOutput = RemoveTagsOutput & __MetadataBearer; /** - *Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. - * For more information, see Tag Clusters. - *
+ *Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in + * various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. + * For more information, see Tag Clusters.
*The following example removes the stack tag with value Prod from a cluster:
*/ export class RemoveTagsCommand extends $Command< diff --git a/clients/client-emr/commands/RunJobFlowCommand.ts b/clients/client-emr/commands/RunJobFlowCommand.ts index 87efa8a25ffc..4f2d3cbc5117 100644 --- a/clients/client-emr/commands/RunJobFlowCommand.ts +++ b/clients/client-emr/commands/RunJobFlowCommand.ts @@ -21,24 +21,28 @@ export type RunJobFlowCommandInput = RunJobFlowInput; export type RunJobFlowCommandOutput = RunJobFlowOutput & __MetadataBearer; /** - *RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps - * specified. After the steps complete, the cluster stops and the HDFS partition is + *
RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the
+ * steps specified. After the steps complete, the cluster stops and the HDFS partition is
* lost. To prevent loss of data, configure the last step of the job flow to store results in
* Amazon S3. If the JobFlowInstancesConfig
- * KeepJobFlowAliveWhenNoSteps
parameter is
- * set to TRUE
, the cluster transitions to the WAITING state rather than shutting down after the steps have completed.
For additional protection, you can set the
- * JobFlowInstancesConfig
- * TerminationProtected
parameter to TRUE
to lock the
- * cluster and prevent it from being
- * terminated by API call, user intervention, or in the event of a job flow error.
KeepJobFlowAliveWhenNoSteps
parameter is set to TRUE
, the cluster
+ * transitions to the WAITING state rather than shutting down after the steps have completed.
+ * For additional protection, you can set the JobFlowInstancesConfig
+ * TerminationProtected
parameter to TRUE
to lock the cluster and
+ * prevent it from being terminated by API call, user intervention, or in the event of a job
+ * flow error.
A maximum of 256 steps are allowed in each job flow.
- *If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.
+ *If your cluster is long-running (such as a Hive data warehouse) or complex, you may + * require more than 256 steps to process your data. You can bypass the 256-step limitation in + * various ways, including using the SSH shell to connect to the master node and submitting + * queries directly to the software running on the master node, such as Hive and Hadoop. For + * more information on how to do this, see Add More than 256 Steps to a + * Cluster in the Amazon EMR Management Guide.
*For long running clusters, we recommend that you periodically store your results.
*The instance fleets configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but not both.
+ *The instance fleets configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets + * parameters or InstanceGroups parameters, but not both.
*SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection
on a cluster is similar to calling the Amazon EC2 DisableAPITermination
API on all EC2 instances in a cluster.
SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the cluster
+ * cannot be terminated by user intervention, an API call, or in the event of a job-flow
+ * error. The cluster still terminates upon successful completion of the job flow. Calling
+ * SetTerminationProtection
on a cluster is similar to calling the Amazon EC2
+ * DisableAPITermination
API on all EC2 instances in a cluster.
- * SetTerminationProtection
is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.
To terminate a cluster that has been locked by setting SetTerminationProtection
to true
,
- * you must first unlock the job flow by a subsequent call to SetTerminationProtection
- * in which you set the value to false
.
For more information, seeManaging Cluster Termination in the - * Amazon EMR Management Guide. - *
+ *SetTerminationProtection
is used to prevent accidental termination of a
+ * cluster and to ensure that in the event of an error, the instances persist so that you can
+ * recover any data stored in their ephemeral instance storage.
+ * To terminate a cluster that has been locked by setting
+ * SetTerminationProtection
to true
, you must first unlock the
+ * job flow by a subsequent call to SetTerminationProtection
in which you set the
+ * value to false
.
For more information, seeManaging Cluster + * Termination in the Amazon EMR Management Guide.
*/ export class SetTerminationProtectionCommand extends $Command< SetTerminationProtectionCommandInput, diff --git a/clients/client-emr/commands/SetVisibleToAllUsersCommand.ts b/clients/client-emr/commands/SetVisibleToAllUsersCommand.ts index 5fd45695d456..4bb04b9d290d 100644 --- a/clients/client-emr/commands/SetVisibleToAllUsersCommand.ts +++ b/clients/client-emr/commands/SetVisibleToAllUsersCommand.ts @@ -21,7 +21,15 @@ export type SetVisibleToAllUsersCommandInput = SetVisibleToAllUsersInput; export type SetVisibleToAllUsersCommandOutput = __MetadataBearer; /** - *Sets the Cluster$VisibleToAllUsers value, which determines whether the cluster is visible to all IAM users of the AWS account associated with the cluster. Only the IAM user who created the cluster or the AWS account root user can call this action. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If set to false
, only the IAM user that created the cluster can perform actions. This action works on running clusters. You can override the default true
setting when you create a cluster by using the VisibleToAllUsers
parameter with RunJobFlow
.
Sets the Cluster$VisibleToAllUsers value, which determines whether the
+ * cluster is visible to all IAM users of the AWS account associated with the cluster. Only
+ * the IAM user who created the cluster or the AWS account root user can call this action. The
+ * default value, true
, indicates that all IAM users in the AWS account can
+ * perform cluster actions if they have the proper IAM policy permissions. If set to
+ * false
, only the IAM user that created the cluster can perform actions. This
+ * action works on running clusters. You can override the default true
setting
+ * when you create a cluster by using the VisibleToAllUsers
parameter with
+ * RunJobFlow
.
TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.
- *The maximum number of clusters allowed is 10. The call to TerminateJobFlows
is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.
TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut + * down, any step not yet completed is canceled and the EC2 instances on which the cluster is + * running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri + * was specified when the cluster was created.
+ *The maximum number of clusters allowed is 10. The call to TerminateJobFlows
+ * is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5
+ * minutes for the cluster to completely terminate and release allocated resources, such as
+ * Amazon EC2 instances.
The Amazon EMR Studio APIs are in preview release for Amazon EMR and are subject to + * change.
+ *Updates the session policy attached to the user or group for the specified Amazon EMR + * Studio.
+ */ +export class UpdateStudioSessionMappingCommand extends $Command< + UpdateStudioSessionMappingCommandInput, + UpdateStudioSessionMappingCommandOutput, + EMRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateStudioSessionMappingCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackEBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.
+ *EBS volume specifications such as volume type, IOPS, and size (GiB) that will be + * requested for the EBS volume attached to an EC2 instance in the cluster.
*/ export interface VolumeSpecification { /** @@ -29,7 +30,8 @@ export interface VolumeSpecification { Iops?: number; /** - *The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.
+ *The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume + * type is EBS-optimized, the minimum value is 10.
*/ SizeInGB: number | undefined; } @@ -41,18 +43,21 @@ export namespace VolumeSpecification { } /** - *Configuration of requested EBS block device associated with the instance group with count of volumes that will be associated to every instance.
+ *Configuration of requested EBS block device associated with the instance group with + * count of volumes that will be associated to every instance.
*/ export interface EbsBlockDeviceConfig { /** - *Number of EBS volumes with a specific volume configuration that will be associated with every instance in the instance group
+ *EBS volume specifications such as volume type, IOPS, and size (GiB) that will be + * requested for the EBS volume attached to an EC2 instance in the cluster.
*/ - VolumesPerInstance?: number; + VolumeSpecification: VolumeSpecification | undefined; /** - *EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.
+ *Number of EBS volumes with a specific volume configuration that will be associated with + * every instance in the instance group
*/ - VolumeSpecification: VolumeSpecification | undefined; + VolumesPerInstance?: number; } export namespace EbsBlockDeviceConfig { @@ -66,14 +71,14 @@ export namespace EbsBlockDeviceConfig { */ export interface EbsConfiguration { /** - *Indicates whether an Amazon EBS volume is EBS-optimized.
+ *An array of Amazon EBS volume specifications attached to a cluster instance.
*/ - EbsOptimized?: boolean; + EbsBlockDeviceConfigs?: EbsBlockDeviceConfig[]; /** - *An array of Amazon EBS volume specifications attached to a cluster instance.
+ *Indicates whether an Amazon EBS volume is EBS-optimized.
*/ - EbsBlockDeviceConfigs?: EbsBlockDeviceConfig[]; + EbsOptimized?: boolean; } export namespace EbsConfiguration { @@ -87,18 +92,18 @@ export enum OnDemandProvisioningAllocationStrategy { } /** - *- * The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy. - *
+ *The launch specification for On-Demand Instances in the instance fleet, which + * determines the allocation strategy.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand instances allocation strategy is available in Amazon EMR version 5.12.1 and later.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in + * Amazon EMR version 5.12.1 and later.
*- * Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first. - *
+ *Specifies the strategy to use in launching On-Demand Instance fleets. Currently, the + * only option is lowest-price (the default), which launches the lowest price first.
*/ AllocationStrategy: OnDemandProvisioningAllocationStrategy | string | undefined; } @@ -116,34 +121,50 @@ export enum SpotProvisioningAllocationStrategy { export type SpotProvisioningTimeoutAction = "SWITCH_TO_ON_DEMAND" | "TERMINATE_CLUSTER"; /** - *The launch specification for Spot instances in the instance fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.
+ *The launch specification for Spot Instances in the instance fleet, which determines the + * defined duration, provisioning timeout behavior, and allocation strategy.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. Spot instance allocation strategy is available in Amazon EMR version 5.12.1 and later.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions. Spot Instance allocation strategy is available in + * Amazon EMR version 5.12.1 and later.
*The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction
is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.
The spot provisioning timeout period in minutes. If Spot Instances are not provisioned
+ * within this time period, the TimeOutAction
is taken. Minimum value is 5 and
+ * maximum value is 1440. The timeout applies only during initial provisioning, when the
+ * cluster is first created.
- * Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is capacity-optimized (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching. - *
+ *The action to take when TargetSpotCapacity
has not been fulfilled when the
+ * TimeoutDurationMinutes
has expired; that is, when all Spot Instances could
+ * not be provisioned within the Spot provisioning timeout. Valid values are
+ * TERMINATE_CLUSTER
and SWITCH_TO_ON_DEMAND
. SWITCH_TO_ON_DEMAND
+ * specifies that if no Spot Instances are available, On-Demand Instances should be
+ * provisioned to fulfill any remaining Spot capacity.
The action to take when TargetSpotCapacity
has not been fulfilled when the TimeoutDurationMinutes
has expired; that is, when all Spot instances could not be provisioned within the Spot provisioning timeout. Valid values are TERMINATE_CLUSTER
and SWITCH_TO_ON_DEMAND
. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.
The defined duration for Spot Instances (also known as Spot blocks) in minutes. When + * specified, the Spot Instance does not terminate before the defined duration expires, and + * defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, + * 300, or 360. The duration period starts as soon as a Spot Instance receives its instance + * ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and + * provides a Spot Instance termination notice, which gives the instance a two-minute warning + * before it terminates.
*/ - TimeoutAction: SpotProvisioningTimeoutAction | string | undefined; + BlockDurationMinutes?: number; /** - *The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates. - *
+ *Specifies the strategy to use in launching Spot Instance fleets. Currently, the only + * option is capacity-optimized (the default), which launches instances from Spot Instance + * pools with optimal capacity for the number of instances that are launching.
*/ - BlockDurationMinutes?: number; + AllocationStrategy?: SpotProvisioningAllocationStrategy | string; } export namespace SpotProvisioningSpecification { @@ -153,23 +174,28 @@ export namespace SpotProvisioningSpecification { } /** - *The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.
+ *The launch specification for Spot Instances in the fleet, which determines the defined + * duration, provisioning timeout behavior, and allocation strategy.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand and Spot instance allocation strategies are available in Amazon EMR version 5.12.1 and later.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions. On-Demand and Spot Instance allocation strategies are + * available in Amazon EMR version 5.12.1 and later.
*The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.
+ *The launch specification for Spot Instances in the fleet, which determines the defined + * duration, provisioning timeout behavior, and allocation strategy.
*/ SpotSpecification?: SpotProvisioningSpecification; /** - *- * The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy. - *
+ *The launch specification for On-Demand Instances in the instance fleet, which + * determines the allocation strategy.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand instances allocation strategy is available in Amazon EMR version 5.12.1 and later.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in + * Amazon EMR version 5.12.1 and later.
*The Amazon Resource Name of the cluster.
- */ - ClusterArn?: string; - /** *The unique identifier of the cluster.
*/ @@ -196,6 +217,11 @@ export interface AddInstanceFleetOutput { *The unique identifier of the instance fleet.
*/ InstanceFleetId?: string; + + /** + *The Amazon Resource Name of the cluster.
+ */ + ClusterArn?: string; } export namespace AddInstanceFleetOutput { @@ -229,14 +255,14 @@ export interface InvalidRequestException extends __SmithyException, $MetadataBea name: "InvalidRequestException"; $fault: "client"; /** - *The message associated with the exception.
+ *The error code associated with the exception.
*/ - Message?: string; + ErrorCode?: string; /** - *The error code associated with the exception.
+ *The message associated with the exception.
*/ - ErrorCode?: string; + Message?: string; } export namespace InvalidRequestException { @@ -246,18 +272,24 @@ export namespace InvalidRequestException { } /** - *The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activities triggered by automatic scaling rules will not cause an instance group to grow above or below these limits.
+ *The upper and lower EC2 instance limits for an automatic scaling policy. Automatic + * scaling activities triggered by automatic scaling rules will not cause an instance group to + * grow above or below these limits.
*/ export interface ScalingConstraints { /** - *The upper boundary of EC2 instances in an instance group beyond which scaling activities are not allowed to grow. Scale-out activities will not add instances beyond this boundary.
+ *The lower boundary of EC2 instances in an instance group below which scaling activities + * are not allowed to shrink. Scale-in activities will not terminate instances below this + * boundary.
*/ - MaxCapacity: number | undefined; + MinCapacity: number | undefined; /** - *The lower boundary of EC2 instances in an instance group below which scaling activities are not allowed to shrink. Scale-in activities will not terminate instances below this boundary.
+ *The upper boundary of EC2 instances in an instance group beyond which scaling activities + * are not allowed to grow. Scale-out activities will not add instances beyond this + * boundary.
*/ - MinCapacity: number | undefined; + MaxCapacity: number | undefined; } export namespace ScalingConstraints { @@ -278,23 +310,42 @@ export enum AdjustmentType { } /** - *An automatic scaling configuration, which describes how the policy adds or removes instances, the cooldown period, and the number of EC2 instances that will be added each time the CloudWatch metric alarm condition is satisfied.
+ *An automatic scaling configuration, which describes how the policy adds or removes + * instances, the cooldown period, and the number of EC2 instances that will be added each + * time the CloudWatch metric alarm condition is satisfied.
*/ export interface SimpleScalingPolicyConfiguration { /** - *The way in which EC2 instances are added (if ScalingAdjustment
is a positive number) or terminated (if ScalingAdjustment
is a negative number) each time the scaling activity is triggered. CHANGE_IN_CAPACITY
is the default. CHANGE_IN_CAPACITY
indicates that the EC2 instance count increments or decrements by ScalingAdjustment
, which should be expressed as an integer. PERCENT_CHANGE_IN_CAPACITY
indicates the instance count increments or decrements by the percentage specified by ScalingAdjustment
, which should be expressed as an integer. For example, 20 indicates an increase in 20% increments of cluster capacity. EXACT_CAPACITY
indicates the scaling activity results in an instance group with the number of EC2 instances specified by ScalingAdjustment
, which should be expressed as a positive integer.
The way in which EC2 instances are added (if ScalingAdjustment
is a
+ * positive number) or terminated (if ScalingAdjustment
is a negative number)
+ * each time the scaling activity is triggered. CHANGE_IN_CAPACITY
is the
+ * default. CHANGE_IN_CAPACITY
indicates that the EC2 instance count increments
+ * or decrements by ScalingAdjustment
, which should be expressed as an integer.
+ * PERCENT_CHANGE_IN_CAPACITY
indicates the instance count increments or
+ * decrements by the percentage specified by ScalingAdjustment
, which should be
+ * expressed as an integer. For example, 20 indicates an increase in 20% increments of cluster
+ * capacity. EXACT_CAPACITY
indicates the scaling activity results in an instance
+ * group with the number of EC2 instances specified by ScalingAdjustment
, which
+ * should be expressed as a positive integer.
The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start. The default value is 0.
+ *The amount by which to scale in or scale out, based on the specified
+ * AdjustmentType
. A positive value adds to the instance group's EC2 instance
+ * count while a negative number removes instances. If AdjustmentType
is set to
+ * EXACT_CAPACITY
, the number should only be a positive integer. If
+ * AdjustmentType
is set to PERCENT_CHANGE_IN_CAPACITY
, the value
+ * should express the percentage as an integer. For example, -20 indicates a decrease in 20%
+ * increments of cluster capacity.
The amount by which to scale in or scale out, based on the specified AdjustmentType
. A positive value adds to the instance group's EC2 instance count while a negative number removes instances. If AdjustmentType
is set to EXACT_CAPACITY
, the number should only be a positive integer. If AdjustmentType
is set to PERCENT_CHANGE_IN_CAPACITY
, the value should express the percentage as an integer. For example, -20 indicates a decrease in 20% increments of cluster capacity.
The amount of time, in seconds, after a scaling activity completes before any further + * trigger-related scaling activities can start. The default value is 0.
*/ - ScalingAdjustment: number | undefined; + CoolDown?: number; } export namespace SimpleScalingPolicyConfiguration { @@ -304,16 +355,19 @@ export namespace SimpleScalingPolicyConfiguration { } /** - *The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.
+ *The type of adjustment the automatic scaling activity makes when triggered, and the + * periodicity of the adjustment.
*/ export interface ScalingAction { /** - *Not available for instance groups. Instance groups use the market type specified for the group.
+ *Not available for instance groups. Instance groups use the market type specified for the + * group.
*/ Market?: MarketType | string; /** - *The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.
+ *The type of adjustment the automatic scaling activity makes when triggered, and the + * periodicity of the adjustment.
*/ SimpleScalingPolicyConfiguration: SimpleScalingPolicyConfiguration | undefined; } @@ -332,18 +386,22 @@ export enum ComparisonOperator { } /** - *A CloudWatch dimension, which is specified using a Key
(known as a Name
in CloudWatch), Value
pair. By default, Amazon EMR uses one dimension whose Key
is JobFlowID
and Value
is a variable representing the cluster ID, which is ${emr.clusterId}
. This enables the rule to bootstrap when the cluster ID becomes available.
A CloudWatch dimension, which is specified using a Key
(known as a
+ * Name
in CloudWatch), Value
pair. By default, Amazon EMR uses
+ * one dimension whose Key
is JobFlowID
and Value
is a
+ * variable representing the cluster ID, which is ${emr.clusterId}
. This enables
+ * the rule to bootstrap when the cluster ID becomes available.
The dimension value.
+ *The dimension name.
*/ - Value?: string; + Key?: string; /** - *The dimension name.
+ *The dimension value.
*/ - Key?: string; + Value?: string; } export namespace MetricDimension { @@ -391,53 +449,65 @@ export enum Unit { } /** - *The definition of a CloudWatch metric alarm, which determines when an automatic scaling activity is triggered. When the defined alarm conditions are satisfied, scaling activity begins.
+ *The definition of a CloudWatch metric alarm, which determines when an automatic scaling + * activity is triggered. When the defined alarm conditions are satisfied, scaling activity + * begins.
*/ export interface CloudWatchAlarmDefinition { /** - *The namespace for the CloudWatch metric. The default is AWS/ElasticMapReduce
.
Determines how the metric specified by MetricName
is compared to the value
+ * specified by Threshold
.
The value against which the specified statistic is compared.
+ *The number of periods, in five-minute increments, during which the alarm condition must
+ * exist before the alarm triggers automatic scaling activity. The default value is
+ * 1
.
The unit of measure associated with the CloudWatch metric being watched. The value specified for Unit
must correspond to the units specified in the CloudWatch metric.
The name of the CloudWatch metric that is watched to determine an alarm + * condition.
*/ - Unit?: Unit | string; + MetricName: string | undefined; /** - *The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify 300
.
The namespace for the CloudWatch metric. The default is
+ * AWS/ElasticMapReduce
.
Determines how the metric specified by MetricName
is compared to the value specified by Threshold
.
The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are
+ * emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified,
+ * specify 300
.
The name of the CloudWatch metric that is watched to determine an alarm condition.
+ *The statistic to apply to the metric associated with the alarm. The default is
+ * AVERAGE
.
The statistic to apply to the metric associated with the alarm. The default is AVERAGE
.
The value against which the specified statistic is compared.
*/ - Statistic?: Statistic | string; + Threshold: number | undefined; /** - *A CloudWatch metric dimension.
+ *The unit of measure associated with the CloudWatch metric being watched. The value
+ * specified for Unit
must correspond to the units specified in the CloudWatch
+ * metric.
The number of periods, in five-minute increments, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is 1
.
A CloudWatch metric dimension.
*/ - EvaluationPeriods?: number; + Dimensions?: MetricDimension[]; } export namespace CloudWatchAlarmDefinition { @@ -451,7 +521,8 @@ export namespace CloudWatchAlarmDefinition { */ export interface ScalingTrigger { /** - *The definition of a CloudWatch metric alarm. When the defined alarm conditions are met along with other trigger parameters, scaling activity begins.
+ *The definition of a CloudWatch metric alarm. When the defined alarm conditions are met + * along with other trigger parameters, scaling activity begins.
*/ CloudWatchAlarmDefinition: CloudWatchAlarmDefinition | undefined; } @@ -463,28 +534,33 @@ export namespace ScalingTrigger { } /** - *A scale-in or scale-out rule that defines scaling activity, including the CloudWatch metric alarm that triggers activity, how EC2 instances are added or removed, and the periodicity of adjustments. The automatic scaling policy for an instance group can comprise one or more automatic scaling rules.
+ *A scale-in or scale-out rule that defines scaling activity, including the CloudWatch + * metric alarm that triggers activity, how EC2 instances are added or removed, and the + * periodicity of adjustments. The automatic scaling policy for an instance group can comprise + * one or more automatic scaling rules.
*/ export interface ScalingRule { /** - *A friendly, more verbose description of the automatic scaling rule.
+ *The name used to identify an automatic scaling rule. Rule names must be unique within a + * scaling policy.
*/ - Description?: string; + Name: string | undefined; /** - *The CloudWatch alarm definition that determines when automatic scaling activity is triggered.
+ *A friendly, more verbose description of the automatic scaling rule.
*/ - Trigger: ScalingTrigger | undefined; + Description?: string; /** - *The name used to identify an automatic scaling rule. Rule names must be unique within a scaling policy.
+ *The conditions that trigger an automatic scaling activity.
*/ - Name: string | undefined; + Action: ScalingAction | undefined; /** - *The conditions that trigger an automatic scaling activity.
+ *The CloudWatch alarm definition that determines when automatic scaling activity is + * triggered.
*/ - Action: ScalingAction | undefined; + Trigger: ScalingTrigger | undefined; } export namespace ScalingRule { @@ -494,18 +570,23 @@ export namespace ScalingRule { } /** - *An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. An automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.
+ *An automatic scaling policy for a core instance group or task instance group in an + * Amazon EMR cluster. An automatic scaling policy defines how an instance group dynamically + * adds and terminates EC2 instances in response to the value of a CloudWatch metric. See + * PutAutoScalingPolicy.
*/ export interface AutoScalingPolicy { /** - *The scale-in and scale-out rules that comprise the automatic scaling policy.
+ *The upper and lower EC2 instance limits for an automatic scaling policy. Automatic + * scaling activity will not cause an instance group to grow above or below these + * limits.
*/ - Rules: ScalingRule[] | undefined; + Constraints: ScalingConstraints | undefined; /** - *The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.
+ *The scale-in and scale-out rules that comprise the automatic scaling policy.
*/ - Constraints: ScalingConstraints | undefined; + Rules: ScalingRule[] | undefined; } export namespace AutoScalingPolicy { @@ -543,7 +624,8 @@ export namespace AddInstanceGroupsOutput { } /** - *Indicates that an error occurred while processing the request and that the request was not completed.
+ *Indicates that an error occurred while processing the request and that the request was + * not completed.
*/ export interface InternalServerError extends __SmithyException, $MetadataBearer { name: "InternalServerError"; @@ -557,11 +639,11 @@ export namespace InternalServerError { } /** - *A key value pair.
+ *A key-value pair.
*/ export interface KeyValue { /** - *The unique identifier of a key value pair.
+ *The unique identifier of a key-value pair.
*/ Key?: string; @@ -578,28 +660,33 @@ export namespace KeyValue { } /** - *A job flow step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.
+ *A job flow step consisting of a JAR file whose main function will be executed. The main + * function submits a job for Hadoop to execute and waits for the job to finish or + * fail.
*/ export interface HadoopJarStepConfig { /** - *A list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.
+ *A list of Java properties that are set when the step runs. You can use these properties + * to pass key value pairs to your main function.
*/ Properties?: KeyValue[]; /** - *The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.
+ *A path to a JAR file run during the step.
*/ - MainClass?: string; + Jar: string | undefined; /** - *A list of command line arguments passed to the JAR file's main function when executed.
+ *The name of the main class in the specified Java file. If not specified, the JAR file + * should specify a Main-Class in its manifest file.
*/ - Args?: string[]; + MainClass?: string; /** - *A path to a JAR file run during the step.
+ *A list of command line arguments passed to the JAR file's main function when + * executed.
*/ - Jar: string | undefined; + Args?: string[]; } export namespace HadoopJarStepConfig { @@ -613,19 +700,21 @@ export namespace HadoopJarStepConfig { */ export interface StepConfig { /** - *The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.
+ *The name of the step.
*/ - ActionOnFailure?: ActionOnFailure | string; + Name: string | undefined; /** - *The JAR file used for the step.
+ *The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, + * CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. + * We recommend using TERMINATE_CLUSTER instead.
*/ - HadoopJarStep: HadoopJarStepConfig | undefined; + ActionOnFailure?: ActionOnFailure | string; /** - *The name of the step.
+ *The JAR file used for the step.
*/ - Name: string | undefined; + HadoopJarStep: HadoopJarStepConfig | undefined; } export namespace StepConfig { @@ -639,8 +728,8 @@ export namespace StepConfig { */ export interface AddJobFlowStepsInput { /** - *A string that uniquely identifies the job flow. This identifier is returned by - * RunJobFlow and can also be obtained from ListClusters.
+ *A string that uniquely identifies the job flow. This identifier is returned by RunJobFlow and can also be obtained from ListClusters. + *
*/ JobFlowId: string | undefined; @@ -673,24 +762,24 @@ export namespace AddJobFlowStepsOutput { } /** - *A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. - * For more information, see Tag Clusters. - *
+ *A key-value pair containing user-defined metadata that you can associate with an Amazon + * EMR resource. Tags make it easier to associate clusters in various ways, such as grouping + * clusters to track your Amazon EMR resource allocation costs. For more information, see + * Tag + * Clusters.
*/ export interface Tag { /** - *A user-defined value, which is optional in a tag. - * For more information, see Tag Clusters. - *
+ *A user-defined key, which is the minimum required information for a valid tag. For more + * information, see Tag .
*/ - Value?: string; + Key?: string; /** - *A user-defined key, which is the minimum required information for a valid tag. - * For more information, see Tag . - *
+ *A user-defined value, which is optional in a tag. For more information, see Tag + * Clusters.
*/ - Key?: string; + Value?: string; } export namespace Tag { @@ -704,14 +793,17 @@ export namespace Tag { */ export interface AddTagsInput { /** - *A list of tags to associate with a cluster and propagate to EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.
+ *The Amazon EMR resource identifier to which tags will be added. This value must be a + * cluster identifier.
*/ - Tags: Tag[] | undefined; + ResourceId: string | undefined; /** - *The Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.
+ *A list of tags to associate with a cluster and propagate to EC2 instances. Tags are + * user-defined key-value pairs that consist of a required key string with a maximum of 128 + * characters, and an optional value string with a maximum of 256 characters.
*/ - ResourceId: string | undefined; + Tags: Tag[] | undefined; } export namespace AddTagsInput { @@ -732,14 +824,21 @@ export namespace AddTagsOutput { } /** - *With Amazon EMR release version 4.0 and later, the only accepted parameter is the application name. To pass arguments to applications, you use configuration classifications specified using configuration JSON objects. For more information, see Configuring Applications.
- *With earlier Amazon EMR releases, the application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument.
+ *With Amazon EMR release version 4.0 and later, the only accepted parameter is the + * application name. To pass arguments to applications, you use configuration classifications + * specified using configuration JSON objects. For more information, see Configuring + * Applications.
+ *With earlier Amazon EMR releases, the application is any Amazon or third-party software + * that you can add to the cluster. This structure contains a list of strings that indicates + * the software to use with the cluster and accepts a user argument list. Amazon EMR accepts + * and forwards the argument list to the corresponding installation script as bootstrap action + * argument.
*/ export interface Application { /** - *This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.
+ *The name of the application.
*/ - AdditionalInfo?: { [key: string]: string }; + Name?: string; /** *The version of the application.
@@ -747,14 +846,15 @@ export interface Application { Version?: string; /** - *The name of the application.
+ *Arguments for Amazon EMR to pass to the application.
*/ - Name?: string; + Args?: string[]; /** - *Arguments for Amazon EMR to pass to the application.
+ *This option is for advanced users only. This is meta information about third-party + * applications that third-party vendors use for testing purposes.
*/ - Args?: string[]; + AdditionalInfo?: { [key: string]: string }; } export namespace Application { @@ -763,6 +863,11 @@ export namespace Application { }); } +export enum AuthMode { + IAM = "IAM", + SSO = "SSO", +} + export enum AutoScalingPolicyState { ATTACHED = "ATTACHED", ATTACHING = "ATTACHING", @@ -783,14 +888,18 @@ export enum AutoScalingPolicyStateChangeReasonCode { */ export interface AutoScalingPolicyStateChangeReason { /** - *A friendly, more verbose message that accompanies an automatic scaling policy state change.
+ *The code indicating the reason for the change in status.USER_REQUEST
+ * indicates that the scaling policy status was changed by a user.
+ * PROVISION_FAILURE
indicates that the status change was because the policy
+ * failed to provision. CLEANUP_FAILURE
indicates an error.
The code indicating the reason for the change in status.USER_REQUEST
indicates that the scaling policy status was changed by a user. PROVISION_FAILURE
indicates that the status change was because the policy failed to provision. CLEANUP_FAILURE
indicates an error.
A friendly, more verbose message that accompanies an automatic scaling policy state + * change.
*/ - Code?: AutoScalingPolicyStateChangeReasonCode | string; + Message?: string; } export namespace AutoScalingPolicyStateChangeReason { @@ -800,18 +909,19 @@ export namespace AutoScalingPolicyStateChangeReason { } /** - *The status of an automatic scaling policy.
+ *The status of an automatic scaling policy. + *
*/ export interface AutoScalingPolicyStatus { /** - *The reason for a change in status.
+ *Indicates the status of the automatic scaling policy.
*/ - StateChangeReason?: AutoScalingPolicyStateChangeReason; + State?: AutoScalingPolicyState | string; /** - *Indicates the status of the automatic scaling policy.
+ *The reason for a change in status.
*/ - State?: AutoScalingPolicyState | string; + StateChangeReason?: AutoScalingPolicyStateChangeReason; } export namespace AutoScalingPolicyStatus { @@ -821,18 +931,23 @@ export namespace AutoScalingPolicyStatus { } /** - *An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.
+ *An automatic scaling policy for a core instance group or task instance group in an + * Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically + * adds and terminates EC2 instances in response to the value of a CloudWatch metric. See + * PutAutoScalingPolicy.
*/ export interface AutoScalingPolicyDescription { /** - *The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.
+ *The status of an automatic scaling policy.
*/ - Constraints?: ScalingConstraints; + Status?: AutoScalingPolicyStatus; /** - *The status of an automatic scaling policy.
+ *The upper and lower EC2 instance limits for an automatic scaling policy. Automatic + * scaling activity will not cause an instance group to grow above or below these + * limits.
*/ - Status?: AutoScalingPolicyStatus; + Constraints?: ScalingConstraints; /** *The scale-in and scale-out rules that comprise the automatic scaling policy.
@@ -847,7 +962,9 @@ export namespace AutoScalingPolicyDescription { } /** - *A list of port ranges that are permitted to allow inbound traffic from all public IP addresses. To specify a single port, use the same value for MinRange
and MaxRange
.
A list of port ranges that are permitted to allow inbound traffic from all public IP
+ * addresses. To specify a single port, use the same value for MinRange
and
+ * MaxRange
.
Properties that describe the AWS principal that created the BlockPublicAccessConfiguration
using the PutBlockPublicAccessConfiguration
action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.
Properties that describe the AWS principal that created the
+ * BlockPublicAccessConfiguration
using the
+ * PutBlockPublicAccessConfiguration
action as well as the date and time that
+ * the configuration was created. Each time a configuration for block public access is
+ * updated, Amazon EMR updates this metadata.
The Amazon Resource Name that created or last modified the configuration.
+ *The date and time that the configuration was created.
*/ - CreatedByArn: string | undefined; + CreationDateTime: Date | undefined; /** - *The date and time that the configuration was created.
+ *The Amazon Resource Name that created or last modified the configuration.
*/ - CreationDateTime: Date | undefined; + CreatedByArn: string | undefined; } export namespace BlockPublicAccessConfigurationMetadata { @@ -893,7 +1014,8 @@ export namespace BlockPublicAccessConfigurationMetadata { */ export interface ScriptBootstrapActionConfig { /** - *Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system.
+ *Location of the script to run during a bootstrap action. Can be either a location in + * Amazon S3 or on a local file system.
*/ Path: string | undefined; @@ -953,17 +1075,19 @@ export type StepCancellationOption = "SEND_INTERRUPT" | "TERMINATE_PROCESS"; */ export interface CancelStepsInput { /** - *The list of StepIDs
to cancel. Use ListSteps to get steps and their states for the specified cluster.
The ClusterID
for the specified steps that will be canceled. Use RunJobFlow and ListClusters to get ClusterIDs.
The ClusterID
for which specified steps will be canceled. Use RunJobFlow and ListClusters to get ClusterIDs.
The list of StepIDs
to cancel. Use ListSteps to get steps
+ * and their states for the specified cluster.
The option to choose for cancelling RUNNING
steps. By default, the value is SEND_INTERRUPT
.
The option to choose to cancel RUNNING
steps. By default, the value is
+ * SEND_INTERRUPT
.
Specification of the status of a CancelSteps request. Available only in Amazon EMR version 4.8.0 and later, excluding version 5.0.0.
+ *Specification of the status of a CancelSteps request. Available only in Amazon EMR + * version 4.8.0 and later, excluding version 5.0.0.
*/ export interface CancelStepsInfo { /** - *The status of a CancelSteps Request. The value may be SUBMITTED or FAILED.
+ *The encrypted StepId of a step.
*/ - Status?: CancelStepsRequestStatus | string; + StepId?: string; /** - *The encrypted StepId of a step.
+ *The status of a CancelSteps Request. The value may be SUBMITTED or FAILED.
*/ - StepId?: string; + Status?: CancelStepsRequestStatus | string; /** *The reason for the failure if the CancelSteps request fails.
@@ -1010,7 +1135,8 @@ export namespace CancelStepsInfo { */ export interface CancelStepsOutput { /** - *A list of CancelStepsInfo, which shows the status of specified cancel requests for each StepID
specified.
A list of CancelStepsInfo, which shows the status of specified cancel
+ * requests for each StepID
specified.
Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.
+ *Provides information about the EC2 instances in a cluster grouped by category. For + * example, key name, subnet ID, IAM instance profile, and so on.
*/ export interface Ec2InstanceAttributes { /** - *The name of the Amazon EC2 key pair to use when connecting with SSH into the master node as a user named "hadoop".
+ *The name of the Amazon EC2 key pair to use when connecting with SSH into the master node + * as a user named "hadoop".
*/ Ec2KeyName?: string; /** - *The identifier of the Amazon EC2 security group for the core and task nodes.
+ *Set this parameter to the identifier of the Amazon VPC subnet where you want the cluster + * to launch. If you do not specify this value, and your account supports EC2-Classic, the + * cluster launches in EC2-Classic.
*/ - EmrManagedSlaveSecurityGroup?: string; + Ec2SubnetId?: string; /** - *The IAM role that was specified when the cluster was launched. The EC2 instances of the cluster assume this role.
+ *Applies to clusters configured with the instance fleets option. Specifies the unique
+ * identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances.
+ * Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit
+ * from among the list of RequestedEc2SubnetIds
, and then launches all cluster
+ * instances within that Subnet. If this value is not specified, and the account and Region
+ * support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and
+ * uses RequestedEc2AvailabilityZones
instead of this setting. If EC2-Classic is
+ * not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you.
+ * RequestedEc2SubnetIDs
and RequestedEc2AvailabilityZones
cannot
+ * be specified together.
Applies to clusters configured with the instance fleets option. Specifies one or more Availability Zones in which to launch EC2 cluster instances when the EC2-Classic network configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from among the list of RequestedEc2AvailabilityZones
, and then launches all cluster instances within that Availability Zone. If you do not specify this value, Amazon EMR chooses the Availability Zone for you. RequestedEc2SubnetIDs
and RequestedEc2AvailabilityZones
cannot be specified together.
The Availability Zone in which the cluster will run.
*/ - RequestedEc2AvailabilityZones?: string[]; + Ec2AvailabilityZone?: string; /** - *Applies to clusters configured with the instance fleets option. Specifies the unique identifier of one or more Amazon EC2 subnets in which to launch EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds
, and then launches all cluster instances within that Subnet. If this value is not specified, and the account and Region support EC2-Classic networks, the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones
instead of this setting. If EC2-Classic is not supported, and no Subnet is specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs
and RequestedEc2AvailabilityZones
cannot be specified together.
Applies to clusters configured with the instance fleets option. Specifies one or more
+ * Availability Zones in which to launch EC2 cluster instances when the EC2-Classic network
+ * configuration is supported. Amazon EMR chooses the Availability Zone with the best fit from
+ * among the list of RequestedEc2AvailabilityZones
, and then launches all cluster
+ * instances within that Availability Zone. If you do not specify this value, Amazon EMR
+ * chooses the Availability Zone for you. RequestedEc2SubnetIDs
and
+ * RequestedEc2AvailabilityZones
cannot be specified together.
Set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, and your account supports EC2-Classic, the cluster launches in EC2-Classic.
+ *The IAM role that was specified when the cluster was launched. The EC2 instances of the + * cluster assume this role.
*/ - Ec2SubnetId?: string; + IamInstanceProfile?: string; /** - *A list of additional Amazon EC2 security group IDs for the core and task nodes.
+ *The identifier of the Amazon EC2 security group for the master node.
*/ - AdditionalSlaveSecurityGroups?: string[]; + EmrManagedMasterSecurityGroup?: string; /** - *The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
+ *The identifier of the Amazon EC2 security group for the core and task nodes.
*/ - ServiceAccessSecurityGroup?: string; + EmrManagedSlaveSecurityGroup?: string; /** - *The Availability Zone in which the cluster will run.
+ *The identifier of the Amazon EC2 security group for the Amazon EMR service to access + * clusters in VPC private subnets.
*/ - Ec2AvailabilityZone?: string; + ServiceAccessSecurityGroup?: string; /** *A list of additional Amazon EC2 security group IDs for the master node.
@@ -1076,9 +1223,9 @@ export interface Ec2InstanceAttributes { AdditionalMasterSecurityGroups?: string[]; /** - *The identifier of the Amazon EC2 security group for the master node.
+ *A list of additional Amazon EC2 security group IDs for the core and task nodes.
*/ - EmrManagedMasterSecurityGroup?: string; + AdditionalSlaveSecurityGroups?: string[]; } export namespace Ec2InstanceAttributes { @@ -1093,29 +1240,35 @@ export enum InstanceCollectionType { } /** - *Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see Use Kerberos Authentication in the EMR Management Guide.
+ *Attributes for Kerberos configuration when Kerberos authentication is enabled using a + * security configuration. For more information see Use Kerberos Authentication + * in the Amazon EMR Management Guide.
*/ export interface KerberosAttributes { /** - *The name of the Kerberos realm to which all nodes in a cluster belong. For example, EC2.INTERNAL
.
- *
The name of the Kerberos realm to which all nodes in a cluster belong. For example,
+ * EC2.INTERNAL
.
Required only when establishing a cross-realm trust with a KDC in a different realm. The cross-realm principal password, which must be identical across realms.
+ *The password used within the cluster for the kadmin service on the cluster-dedicated + * KDC, which maintains Kerberos principals, password policies, and keytabs for the + * cluster.
*/ - CrossRealmTrustPrincipalPassword?: string; + KdcAdminPassword: string | undefined; /** - *Required only when establishing a cross-realm trust with an Active Directory domain. A user with sufficient privileges to join resources to the domain.
+ *Required only when establishing a cross-realm trust with a KDC in a different realm. The + * cross-realm principal password, which must be identical across realms.
*/ - ADDomainJoinUser?: string; + CrossRealmTrustPrincipalPassword?: string; /** - *The password used within the cluster for the kadmin service on the cluster-dedicated KDC, which maintains Kerberos principals, password policies, and keytabs for the cluster.
+ *Required only when establishing a cross-realm trust with an Active Directory domain. A + * user with sufficient privileges to join resources to the domain.
*/ - KdcAdminPassword: string | undefined; + ADDomainJoinUser?: string; /** *The Active Directory password for ADDomainJoinUser
.
Placement group configuration for an Amazon EMR cluster. The configuration specifies the placement strategy that can be applied to instance roles during cluster creation.
- *To use this configuration, consider attaching managed policy AmazonElasticMapReducePlacementGroupPolicy to the EMR role.
+ *Placement group configuration for an Amazon EMR cluster. The configuration specifies the + * placement strategy that can be applied to instance roles during cluster creation.
+ *To use this configuration, consider attaching managed policy + * AmazonElasticMapReducePlacementGroupPolicy to the EMR role.
*/ export interface PlacementGroupConfig { /** *Role of the instance in the cluster.
- *Starting with Amazon EMR version 5.23.0, the only supported instance role is MASTER
.
Starting with Amazon EMR version 5.23.0, the only supported instance role is
+ * MASTER
.
EC2 Placement Group strategy associated with instance role.
- *Starting with Amazon EMR version 5.23.0, the only supported placement strategy is SPREAD
for the MASTER
instance role.
Starting with Amazon EMR version 5.23.0, the only supported placement strategy is
+ * SPREAD
for the MASTER
instance role.
The descriptive message for the state change reason.
+ *The programmatic code for the state change reason.
*/ - Message?: string; + Code?: ClusterStateChangeReasonCode | string; /** - *The programmatic code for the state change reason.
+ *The descriptive message for the state change reason.
*/ - Code?: ClusterStateChangeReasonCode | string; + Message?: string; } export namespace ClusterStateChangeReason { @@ -1217,14 +1374,14 @@ export namespace ClusterStateChangeReason { */ export interface ClusterTimeline { /** - *The date and time when the cluster was ready to execute steps.
+ *The creation date and time of the cluster.
*/ - ReadyDateTime?: Date; + CreationDateTime?: Date; /** - *The creation date and time of the cluster.
+ *The date and time when the cluster was ready to run steps.
*/ - CreationDateTime?: Date; + ReadyDateTime?: Date; /** *The date and time when the cluster was terminated.
@@ -1253,7 +1410,8 @@ export interface ClusterStatus { StateChangeReason?: ClusterStateChangeReason; /** - *A timeline that represents the status of a cluster over the lifetime of the cluster.
+ *A timeline that represents the status of a cluster over the lifetime of the + * cluster.
*/ Timeline?: ClusterTimeline; } @@ -1268,22 +1426,15 @@ export namespace ClusterStatus { *The summary description of the cluster.
*/ export interface ClusterSummary { - /** - *The Amazon Resource Name of the cluster.
- */ - ClusterArn?: string; - /** *The unique identifier for the cluster.
*/ Id?: string; /** - *- * The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. - *
+ *The name of the cluster.
*/ - OutpostArn?: string; + Name?: string; /** *The details about the current status of the cluster.
@@ -1291,14 +1442,23 @@ export interface ClusterSummary { Status?: ClusterStatus; /** - *The name of the cluster.
+ *An approximation of the cost of the cluster, represented in m1.small/hours. This value + * is incremented one time for every hour an m1.small instance runs. Larger instances are + * weighted more, so an EC2 instance that is roughly four times more expensive would result in + * the normalized instance hours being incremented by four. This result is only an + * approximation and does not reflect the actual billing rate.
*/ - Name?: string; + NormalizedInstanceHours?: number; /** - *An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.
+ *The Amazon Resource Name of the cluster.
*/ - NormalizedInstanceHours?: number; + ClusterArn?: string; + + /** + *The Amazon Resource Name (ARN) of the Outpost where the cluster is launched.
+ */ + OutpostArn?: string; } export namespace ClusterSummary { @@ -1317,14 +1477,14 @@ export interface Command { Name?: string; /** - *Arguments for Amazon EMR to pass to the command for execution.
+ *The Amazon S3 location of the command script.
*/ - Args?: string[]; + ScriptPath?: string; /** - *The Amazon S3 location of the command script.
+ *Arguments for Amazon EMR to pass to the command for execution.
*/ - ScriptPath?: string; + Args?: string[]; } export namespace Command { @@ -1340,45 +1500,47 @@ export enum ComputeLimitsUnitType { } /** - *- * The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster can not be above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. - *
+ *The EC2 unit limits for a managed scaling policy. The managed scaling activity of a + * cluster can not be above or below these limits. The limit only applies to the core and task + * nodes. The master node cannot be scaled after initial configuration.
*/ export interface ComputeLimits { /** - *- * The unit type used for specifying a managed scaling policy. - *
+ *The unit type used for specifying a managed scaling policy.
*/ UnitType: ComputeLimitsUnitType | string | undefined; /** - *- * The upper boundary of On-Demand EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot instances. - *
+ *The lower boundary of EC2 units. It is measured through vCPU cores or instances for + * instance groups and measured through units for instance fleets. Managed scaling activities + * are not allowed beyond this boundary. The limit only applies to the core and task nodes. + * The master node cannot be scaled after initial configuration.
*/ - MaximumOnDemandCapacityUnits?: number; + MinimumCapacityUnits: number | undefined; /** - *- * The upper boundary of EC2 units for core node type in a cluster. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes. - *
+ *The upper boundary of EC2 units. It is measured through vCPU cores or instances for + * instance groups and measured through units for instance fleets. Managed scaling activities + * are not allowed beyond this boundary. The limit only applies to the core and task nodes. + * The master node cannot be scaled after initial configuration.
*/ - MaximumCoreCapacityUnits?: number; + MaximumCapacityUnits: number | undefined; /** - *- * The lower boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. - *
+ *The upper boundary of On-Demand EC2 units. It is measured through vCPU cores or + * instances for instance groups and measured through units for instance fleets. The On-Demand + * units are not allowed to scale beyond this boundary. The parameter is used to split + * capacity allocation between On-Demand and Spot Instances.
*/ - MinimumCapacityUnits: number | undefined; + MaximumOnDemandCapacityUnits?: number; /** - *- * The upper boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration. - *
+ *The upper boundary of EC2 units for core node type in a cluster. It is measured through + * vCPU cores or instances for instance groups and measured through units for instance fleets. + * The core units are not allowed to scale beyond this boundary. The parameter is used to + * split capacity allocation between core and task nodes.
*/ - MaximumCapacityUnits: number | undefined; + MaximumCoreCapacityUnits?: number; } export namespace ComputeLimits { @@ -1389,14 +1551,17 @@ export namespace ComputeLimits { export interface CreateSecurityConfigurationInput { /** - *The security configuration details in JSON format. For JSON parameters and examples, see Use Security Configurations to Set Up Cluster Security in the Amazon EMR Management Guide.
+ *The name of the security configuration.
*/ - SecurityConfiguration: string | undefined; + Name: string | undefined; /** - *The name of the security configuration.
+ *The security configuration details in JSON format. For JSON parameters and examples, see + * Use Security + * Configurations to Set Up Cluster Security in the Amazon EMR Management + * Guide.
*/ - Name: string | undefined; + SecurityConfiguration: string | undefined; } export namespace CreateSecurityConfigurationInput { @@ -1406,19 +1571,157 @@ export namespace CreateSecurityConfigurationInput { } export interface CreateSecurityConfigurationOutput { + /** + *The name of the security configuration.
+ */ + Name: string | undefined; + /** *The date and time the security configuration was created.
*/ CreationDateTime: Date | undefined; +} + +export namespace CreateSecurityConfigurationOutput { + export const filterSensitiveLog = (obj: CreateSecurityConfigurationOutput): any => ({ + ...obj, + }); +} +export interface CreateStudioInput { /** - *The name of the security configuration.
+ *A descriptive name for the Amazon EMR Studio.
*/ Name: string | undefined; + + /** + *A detailed description of the Studio.
+ */ + Description?: string; + + /** + *Specifies whether the Studio authenticates users using single sign-on (SSO) or IAM. + * Amazon EMR Studio currently only supports SSO authentication.
+ */ + AuthMode: AuthMode | string | undefined; + + /** + *The ID of the Amazon Virtual Private Cloud (Amazon VPC) to associate with the + * Studio.
+ */ + VpcId: string | undefined; + + /** + *A list of subnet IDs to associate with the Studio. The subnets must belong to the VPC
+ * specified by VpcId
. Studio users can create a Workspace in any of the
+ * specified subnets.
The IAM role that will be assumed by the Amazon EMR Studio. The service role provides a + * way for Amazon EMR Studio to interoperate with other AWS services.
+ */ + ServiceRole: string | undefined; + + /** + *The IAM user role that will be assumed by users and groups logged in to a Studio. The + * permissions attached to this IAM role can be scoped down for each user or group using + * session policies.
+ */ + UserRole: string | undefined; + + /** + *The ID of the Amazon EMR Studio Workspace security group. The Workspace security group
+ * allows outbound network traffic to resources in the Engine security group, and it must be
+ * in the same VPC specified by VpcId
.
The ID of the Amazon EMR Studio Engine security group. The Engine security group allows
+ * inbound network traffic from the Workspace security group, and it must be in the same VPC
+ * specified by VpcId
.
The default Amazon S3 location to back up EMR Studio Workspaces and notebook files. A + * Studio user can select an alternative Amazon S3 location when creating a Workspace.
+ */ + DefaultS3Location?: string; + + /** + *A list of tags to associate with the Studio. Tags are user-defined key-value pairs that + * consist of a required key string with a maximum of 128 characters, and an optional value + * string with a maximum of 256 characters.
+ */ + Tags?: Tag[]; } -export namespace CreateSecurityConfigurationOutput { - export const filterSensitiveLog = (obj: CreateSecurityConfigurationOutput): any => ({ +export namespace CreateStudioInput { + export const filterSensitiveLog = (obj: CreateStudioInput): any => ({ + ...obj, + }); +} + +export interface CreateStudioOutput { + /** + *The ID of the Amazon EMR Studio.
+ */ + StudioId?: string; + + /** + *The unique Studio access URL.
+ */ + Url?: string; +} + +export namespace CreateStudioOutput { + export const filterSensitiveLog = (obj: CreateStudioOutput): any => ({ + ...obj, + }); +} + +export enum IdentityType { + GROUP = "GROUP", + USER = "USER", +} + +export interface CreateStudioSessionMappingInput { + /** + *The ID of the Amazon EMR Studio to which the user or group will be mapped.
+ */ + StudioId: string | undefined; + + /** + *The globally unique identifier (GUID) of the user or group from the AWS SSO Identity
+ * Store. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference.
+ * Either IdentityName
or IdentityId
must be specified.
The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.
+ * Either IdentityName
or IdentityId
must be specified.
Specifies whether the identity to map to the Studio is a user or a group.
+ */ + IdentityType: IdentityType | string | undefined; + + /** + *The Amazon Resource Name (ARN) for the session policy that will be applied to the user + * or group. Session policies refine Studio user permissions without the need to use multiple + * IAM user roles.
+ */ + SessionPolicyArn: string | undefined; +} + +export namespace CreateStudioSessionMappingInput { + export const filterSensitiveLog = (obj: CreateStudioSessionMappingInput): any => ({ ...obj, }); } @@ -1444,6 +1747,51 @@ export namespace DeleteSecurityConfigurationOutput { }); } +export interface DeleteStudioInput { + /** + *The ID of the Amazon EMR Studio.
+ */ + StudioId: string | undefined; +} + +export namespace DeleteStudioInput { + export const filterSensitiveLog = (obj: DeleteStudioInput): any => ({ + ...obj, + }); +} + +export interface DeleteStudioSessionMappingInput { + /** + *The ID of the Amazon EMR Studio.
+ */ + StudioId: string | undefined; + + /** + *The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR
+ * Studio. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference.
+ * Either IdentityName
or IdentityId
must be specified.
The name of the user name or group to remove from the Studio. For more information, see
+ * UserName and DisplayName in the AWS SSO Identity Store API Reference.
+ * Either IdentityName
or IdentityId
must be specified.
Specifies whether the identity to delete from the Studio is a user or a group.
+ */ + IdentityType: IdentityType | string | undefined; +} + +export namespace DeleteStudioSessionMappingInput { + export const filterSensitiveLog = (obj: DeleteStudioSessionMappingInput): any => ({ + ...obj, + }); +} + /** *This input determines which cluster to describe.
*/ @@ -1476,24 +1824,24 @@ export enum JobFlowExecutionState { */ export interface DescribeJobFlowsInput { /** - *Return only job flows created before this date and time.
+ *Return only job flows created after this date and time.
*/ - CreatedBefore?: Date; + CreatedAfter?: Date; /** - *Return only job flows whose state is contained in this list.
+ *Return only job flows created before this date and time.
*/ - JobFlowStates?: (JobFlowExecutionState | string)[]; + CreatedBefore?: Date; /** - *Return only job flows created after this date and time.
+ *Return only job flows whose job flow ID is contained in this list.
*/ - CreatedAfter?: Date; + JobFlowIds?: string[]; /** - *Return only job flows whose job flow ID is contained in this list.
+ *Return only job flows whose state is contained in this list.
*/ - JobFlowIds?: string[]; + JobFlowStates?: (JobFlowExecutionState | string)[]; } export namespace DescribeJobFlowsInput { @@ -1512,14 +1860,9 @@ export interface JobFlowExecutionStatusDetail { State: JobFlowExecutionState | string | undefined; /** - *The date and time when the job flow was ready to start running bootstrap actions.
- */ - ReadyDateTime?: Date; - - /** - *Description of the job flow last changed state.
+ *The creation date and time of the job flow.
*/ - LastStateChangeReason?: string; + CreationDateTime: Date | undefined; /** *The start date and time of the job flow.
@@ -1527,14 +1870,19 @@ export interface JobFlowExecutionStatusDetail { StartDateTime?: Date; /** - *The creation date and time of the job flow.
+ *The date and time when the job flow was ready to start running bootstrap actions.
*/ - CreationDateTime: Date | undefined; + ReadyDateTime?: Date; /** *The completion date and time of the job flow.
*/ EndDateTime?: Date; + + /** + *Description of the job flow last changed state.
+ */ + LastStateChangeReason?: string; } export namespace JobFlowExecutionStatusDetail { @@ -1562,59 +1910,63 @@ export enum InstanceGroupState { */ export interface InstanceGroupDetail { /** - *Friendly name for the instance group.
+ *Unique identifier for the instance group.
*/ - Name?: string; + InstanceGroupId?: string; /** - *State of instance group. The following values are deprecated: STARTING, TERMINATED, and FAILED.
+ *Friendly name for the instance group.
*/ - State: InstanceGroupState | string | undefined; + Name?: string; /** - *The date/time the instance group was created.
+ *Market type of the EC2 instances used to create a cluster node.
*/ - CreationDateTime: Date | undefined; + Market: MarketType | string | undefined; /** - *EC2 instance type.
+ *Instance group role in the cluster
*/ - InstanceType: string | undefined; + InstanceRole: InstanceRoleType | string | undefined; /** - *Target number of instances to run in the instance group.
+ *The bid price for each EC2 Spot Instance type as defined by InstanceType
.
+ * Expressed in USD. If neither BidPrice
nor
+ * BidPriceAsPercentageOfOnDemandPrice
is provided,
+ * BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
Details regarding the state of the instance group.
+ *EC2 instance type.
*/ - LastStateChangeReason?: string; + InstanceType: string | undefined; /** - *Unique identifier for the instance group.
+ *Target number of instances to run in the instance group.
*/ - InstanceGroupId?: string; + InstanceRequestCount: number | undefined; /** - *Instance group role in the cluster
+ *Actual count of running instances.
*/ - InstanceRole: InstanceRoleType | string | undefined; + InstanceRunningCount: number | undefined; /** - *Actual count of running instances.
+ *State of instance group. The following values are deprecated: STARTING, TERMINATED, and + * FAILED.
*/ - InstanceRunningCount: number | undefined; + State: InstanceGroupState | string | undefined; /** - *The bid price for each EC2 Spot instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
Details regarding the state of the instance group.
*/ - BidPrice?: string; + LastStateChangeReason?: string; /** - *The date/time the instance group was terminated.
+ *The date/time the instance group was created.
*/ - EndDateTime?: Date; + CreationDateTime: Date | undefined; /** *The date/time the instance group was started.
@@ -1622,14 +1974,14 @@ export interface InstanceGroupDetail { StartDateTime?: Date; /** - *Market type of the EC2 instances used to create a cluster node.
+ *The date/time the instance group was available to the cluster.
*/ - Market: MarketType | string | undefined; + ReadyDateTime?: Date; /** - *The date/time the instance group was available to the cluster.
+ *The date/time the instance group was terminated.
*/ - ReadyDateTime?: Date; + EndDateTime?: Date; } export namespace InstanceGroupDetail { @@ -1643,14 +1995,20 @@ export namespace InstanceGroupDetail { */ export interface PlacementType { /** - *The Amazon EC2 Availability Zone for the cluster. AvailabilityZone
is used for uniform instance groups, while AvailabilityZones
(plural) is used for instance fleets.
The Amazon EC2 Availability Zone for the cluster. AvailabilityZone
is used
+ * for uniform instance groups, while AvailabilityZones
(plural) is used for
+ * instance fleets.
When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances in the optimal Availability Zone. AvailabilityZones
is used for instance fleets, while AvailabilityZone
(singular) is used for uniform instance groups.
When multiple Availability Zones are specified, Amazon EMR evaluates them and launches
+ * instances in the optimal Availability Zone. AvailabilityZones
is used for
+ * instance fleets, while AvailabilityZone
(singular) is used for uniform
+ * instance groups.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*Specify the type of Amazon EC2 instances that the cluster (job flow) runs on.
*/ export interface JobFlowInstancesDetail { + /** + *The Amazon EC2 master node instance type.
+ */ + MasterInstanceType: string | undefined; + + /** + *The DNS name of the master node. If the cluster is on a private subnet, this is the + * private DNS name. On a public subnet, this is the public DNS name.
+ */ + MasterPublicDnsName?: string; + /** *The Amazon EC2 instance identifier of the master node.
*/ @@ -1677,34 +2046,42 @@ export interface JobFlowInstancesDetail { SlaveInstanceType: string | undefined; /** - *The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.
+ *The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance + * serves as both the master and core and task node. If the value is greater than 1, one + * instance is the master node and all others are core and task nodes.
*/ InstanceCount: number | undefined; /** - *The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.
+ *Details about the instance groups in a cluster.
*/ - MasterPublicDnsName?: string; + InstanceGroups?: InstanceGroupDetail[]; /** - *The Amazon EC2 master node instance type.
+ *An approximation of the cost of the cluster, represented in m1.small/hours. This value + * is increased one time for every hour that an m1.small instance runs. Larger instances are + * weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive + * would result in the normalized instance hours being increased incrementally four times. + * This result is only an approximation and does not reflect the actual billing rate.
*/ - MasterInstanceType: string | undefined; + NormalizedInstanceHours?: number; /** - *The Hadoop version for the cluster.
+ *The name of an Amazon EC2 key pair that can be used to connect to the master node using + * SSH.
*/ - HadoopVersion?: string; + Ec2KeyName?: string; /** - *For clusters launched within Amazon Virtual Private Cloud, this is the identifier of the subnet where the cluster was launched.
+ *For clusters launched within Amazon Virtual Private Cloud, this is the identifier of the + * subnet where the cluster was launched.
*/ Ec2SubnetId?: string; /** - *The name of an Amazon EC2 key pair that can be used to ssh to the master node.
+ *The Amazon EC2 Availability Zone for the cluster.
*/ - Ec2KeyName?: string; + Placement?: PlacementType; /** *Specifies whether the cluster should remain available after completing all steps.
@@ -1712,24 +2089,15 @@ export interface JobFlowInstancesDetail { KeepJobFlowAliveWhenNoSteps?: boolean; /** - *Details about the instance groups in a cluster.
- */ - InstanceGroups?: InstanceGroupDetail[]; - - /** - *Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.
+ *Specifies whether the Amazon EC2 instances in the cluster are protected from termination + * by API calls, user intervention, or in the event of a job-flow error.
*/ TerminationProtected?: boolean; /** - *An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour that an m1.small runs. Larger instances are weighted more, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.
- */ - NormalizedInstanceHours?: number; - - /** - *The Amazon EC2 Availability Zone for the cluster.
+ *The Hadoop version for the cluster.
*/ - Placement?: PlacementType; + HadoopVersion?: string; } export namespace JobFlowInstancesDetail { @@ -1756,6 +2124,11 @@ export interface StepExecutionStatusDetail { */ State: StepExecutionState | string | undefined; + /** + *The creation date and time of the step.
+ */ + CreationDateTime: Date | undefined; + /** *The start date and time of the step.
*/ @@ -1770,11 +2143,6 @@ export interface StepExecutionStatusDetail { *A description of the step's current state.
*/ LastStateChangeReason?: string; - - /** - *The creation date and time of the step.
- */ - CreationDateTime: Date | undefined; } export namespace StepExecutionStatusDetail { @@ -1809,14 +2177,14 @@ export namespace StepDetail { */ export interface JobFlowDetail { /** - *The name of the job flow.
+ *The job flow identifier.
*/ - Name: string | undefined; + JobFlowId: string | undefined; /** - *The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
+ *The name of the job flow.
*/ - JobFlowRole?: string; + Name: string | undefined; /** *The location in Amazon S3 where log files for the job are stored.
@@ -1824,19 +2192,17 @@ export interface JobFlowDetail { LogUri?: string; /** - *An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole
. The IAM role provides a way for the automatic scaling feature to get the required permissions it needs to launch and terminate EC2 instances in an instance group.
Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false
, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true
when you create a cluster by using the VisibleToAllUsers
parameter of the RunJobFlow
action.
The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is + * only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.
*/ - VisibleToAllUsers?: boolean; + LogEncryptionKmsKeyId?: string; /** - *The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.
+ *Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and
+ * later, ReleaseLabel
is used. To specify a custom AMI, use
+ * CustomAmiID
.
Describes the execution status of the job flow.
@@ -1844,14 +2210,14 @@ export interface JobFlowDetail { ExecutionStatusDetail: JobFlowExecutionStatusDetail | undefined; /** - *A list of strings set by third party software when the job flow is launched. If you are not using third party software to manage the job flow this value is empty.
+ *Describes the Amazon EC2 instances of the job flow.
*/ - SupportedProducts?: string[]; + Instances: JobFlowInstancesDetail | undefined; /** - *The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
+ *A list of steps run by the job flow.
*/ - ServiceRole?: string; + Steps?: StepDetail[]; /** *A list of the bootstrap actions run by the job flow.
@@ -1859,27 +2225,54 @@ export interface JobFlowDetail { BootstrapActions?: BootstrapActionDetail[]; /** - *A list of steps run by the job flow.
+ *A list of strings set by third-party software when the job flow is launched. If you are + * not using third-party software to manage the job flow, this value is empty.
*/ - Steps?: StepDetail[]; + SupportedProducts?: string[]; /** - *The job flow identifier.
+ *Indicates whether the cluster is visible to all IAM users of the AWS account associated
+ * with the cluster. The default value, true
, indicates that all IAM users in the
+ * AWS account can perform cluster actions if they have the proper IAM policy permissions. If
+ * this value is false
, only the IAM user that created the cluster can perform
+ * actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of
+ * true
when you create a cluster by using the VisibleToAllUsers
+ * parameter of the RunJobFlow
action.
Describes the Amazon EC2 instances of the job flow.
+ *The IAM role that was specified when the job flow was launched. The EC2 instances of the + * job flow assume this role.
*/ - Instances: JobFlowInstancesDetail | undefined; + JobFlowRole?: string; /** - *Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel
is used. To specify a custom AMI, use CustomAmiID
.
The IAM role that is assumed by the Amazon EMR service to access AWS resources on your + * behalf.
*/ - AmiVersion?: string; + ServiceRole?: string; /** - *The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR
indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION
indicates that Amazon EMR blacklists and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION
available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.
An IAM role for automatic scaling policies. The default role is
+ * EMR_AutoScaling_DefaultRole
. The IAM role provides a way for the automatic
+ * scaling feature to get the required permissions it needs to launch and terminate EC2
+ * instances in an instance group.
The way that individual Amazon EC2 instances terminate when an automatic scale-in
+ * activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR
+ * indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of
+ * when the request to terminate the instance was submitted. This option is only available
+ * with Amazon EMR 5.1.0 and later and is the default for clusters created using that version.
+ * TERMINATE_AT_TASK_COMPLETION
indicates that Amazon EMR adds nodes to a deny
+ * list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of
+ * the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes
+ * first and blocks instance termination if it could lead to HDFS corruption.
+ * TERMINATE_AT_TASK_COMPLETION
available only in Amazon EMR version 4.1.0 and
+ * later, and is the default for versions of Amazon EMR earlier than 5.1.0.
Specifies the execution engine (cluster) to run the notebook and perform the notebook execution, for example, an EMR cluster.
+ *Specifies the execution engine (cluster) to run the notebook and perform the notebook + * execution, for example, an EMR cluster.
*/ export interface ExecutionEngineConfig { /** - *An optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. For more information see Specifying EC2 Security Groups for EMR Notebooks in the EMR Management Guide.
+ *The unique identifier of the execution engine. For an EMR cluster, this is the cluster + * ID.
*/ - MasterInstanceSecurityGroupId?: string; + Id: string | undefined; /** - *The unique identifier of the execution engine. For an EMR cluster, this is the cluster ID.
+ *The type of execution engine. A value of EMR
specifies an EMR
+ * cluster.
The type of execution engine. A value of EMR
specifies an EMR cluster.
An optional unique ID of an EC2 security group to associate with the master instance of + * the EMR cluster for this notebook execution. For more information see Specifying + * EC2 Security Groups for EMR Notebooks in the EMR Management + * Guide.
*/ - Type?: ExecutionEngineType | string; + MasterInstanceSecurityGroupId?: string; } export namespace ExecutionEngineConfig { @@ -1963,65 +2362,70 @@ export enum NotebookExecutionStatus { } /** - *A notebook execution. An execution is a specific instance that an EMR Notebook is run using the StartNotebookExecution
action.
A notebook execution. An execution is a specific instance that an EMR Notebook is run
+ * using the StartNotebookExecution
action.
The timestamp when notebook execution ended.
- */ - EndTime?: Date; - - /** - *The Amazon Resource Name (ARN) of the notebook execution.
+ *The unique identifier of a notebook execution.
*/ - Arn?: string; + NotebookExecutionId?: string; /** - *The unique identifier of the EC2 security group associated with the EMR Notebook instance. For more information see Specifying EC2 Security Groups for EMR Notebooks in the EMR Management Guide.
+ *The unique identifier of the EMR Notebook that is used for the notebook + * execution.
*/ - NotebookInstanceSecurityGroupId?: string; + EditorId?: string; /** - *The unique identifier of a notebook execution.
+ *The execution engine, such as an EMR cluster, used to run the EMR notebook and perform + * the notebook execution.
*/ - NotebookExecutionId?: string; + ExecutionEngine?: ExecutionEngineConfig; /** - *The unique identifier of the EMR Notebook that is used for the notebook execution.
+ *A name for the notebook execution.
*/ - EditorId?: string; + NotebookExecutionName?: string; /** - *The execution engine, such as an EMR cluster, used to run the EMR notebook and perform the notebook execution.
+ *Input parameters in JSON format passed to the EMR Notebook at runtime for + * execution.
*/ - ExecutionEngine?: ExecutionEngineConfig; + NotebookParams?: string; /** *The status of the notebook execution.
*
- * START_PENDING
indicates that the cluster has received the execution request but execution has not begun.
START_PENDING
indicates that the cluster has received the execution
+ * request but execution has not begun.
*
- * STARTING
indicates that the execution is starting on the cluster.
STARTING
indicates that the execution is starting on the
+ * cluster.
*
- * RUNNING
indicates that the execution is being processed by the cluster.
RUNNING
indicates that the execution is being processed by the
+ * cluster.
*
- * FINISHING
indicates that execution processing is in the final stages.
FINISHING
indicates that execution processing is in the final
+ * stages.
*
- * FINISHED
indicates that the execution has completed without error.
FINISHED
indicates that the execution has completed without
+ * error.
*
- * FAILING
indicates that the execution is failing and will not finish successfully.
FAILING
indicates that the execution is failing and will not finish
+ * successfully.
* @@ -2029,49 +2433,62 @@ export interface NotebookExecution { *
- * STOP_PENDING
indicates that the cluster has received a StopNotebookExecution
request and the stop is pending.
STOP_PENDING
indicates that the cluster has received a
+ * StopNotebookExecution
request and the stop is pending.
*
- * STOPPING
indicates that the cluster is in the process of stopping the execution as a result of a StopNotebookExecution
request.
STOPPING
indicates that the cluster is in the process of stopping the
+ * execution as a result of a StopNotebookExecution
request.
*
- * STOPPED
indicates that the execution stopped because of a StopNotebookExecution
request.
STOPPED
indicates that the execution stopped because of a
+ * StopNotebookExecution
request.
* The reason for the latest status change of the notebook execution.
+ *The timestamp when notebook execution started.
*/ - LastStateChangeReason?: string; + StartTime?: Date; /** - *The location of the notebook execution's output file in Amazon S3.
+ *The timestamp when notebook execution ended.
*/ - OutputNotebookURI?: string; + EndTime?: Date; /** - *Input parameters in JSON format passed to the EMR Notebook at runtime for execution.
+ *The Amazon Resource Name (ARN) of the notebook execution.
*/ - NotebookParams?: string; + Arn?: string; /** - *A name for the notebook execution.
+ *The location of the notebook execution's output file in Amazon S3.
*/ - NotebookExecutionName?: string; + OutputNotebookURI?: string; /** - *A list of tags associated with a notebook execution. Tags are user-defined key value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.
+ *The reason for the latest status change of the notebook execution.
*/ - Tags?: Tag[]; + LastStateChangeReason?: string; /** - *The timestamp when notebook execution started.
+ *The unique identifier of the EC2 security group associated with the EMR Notebook + * instance. For more information see Specifying + * EC2 Security Groups for EMR Notebooks in the EMR Management + * Guide.
*/ - StartTime?: Date; + NotebookInstanceSecurityGroupId?: string; + + /** + *A list of tags associated with a notebook execution. Tags are user-defined key-value + * pairs that consist of a required key string with a maximum of 128 characters and an + * optional value string with a maximum of 256 characters.
+ */ + Tags?: Tag[]; } export namespace NotebookExecution { @@ -2151,28 +2568,33 @@ export namespace DescribeStepInput { } /** - *A cluster step consisting of a JAR file whose main function will be executed. The main function submits a job for Hadoop to execute and waits for the job to finish or fail.
+ *A cluster step consisting of a JAR file whose main function will be executed. The main + * function submits a job for Hadoop to execute and waits for the job to finish or + * fail.
*/ export interface HadoopStepConfig { /** - *The name of the main class in the specified Java file. If not specified, the JAR file should specify a main class in its manifest file.
+ *The path to the JAR file that runs during the step.
*/ - MainClass?: string; + Jar?: string; /** - *The list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.
+ *The list of Java properties that are set when the step runs. You can use these + * properties to pass key-value pairs to your main function.
*/ Properties?: { [key: string]: string }; /** - *The list of command line arguments to pass to the JAR file's main function for execution.
+ *The name of the main class in the specified Java file. If not specified, the JAR file + * should specify a main class in its manifest file.
*/ - Args?: string[]; + MainClass?: string; /** - *The path to the JAR file that runs during the step.
+ *The list of command line arguments to pass to the JAR file's main function for + * execution.
*/ - Jar?: string; + Args?: string[]; } export namespace HadoopStepConfig { @@ -2182,21 +2604,26 @@ export namespace HadoopStepConfig { } /** - *The details of the step failure. The service attempts to detect the root cause for many common failures.
+ *The details of the step failure. The service attempts to detect the root cause for many + * common failures.
*/ export interface FailureDetails { /** - *The reason for the step failure. In the case where the service cannot successfully determine the root cause of the failure, it returns "Unknown Error" as a reason.
+ *The reason for the step failure. In the case where the service cannot successfully + * determine the root cause of the failure, it returns "Unknown Error" as a reason.
*/ Reason?: string; /** - *The descriptive message including the error the EMR service has identified as the cause of step failure. This is text from an error log that describes the root cause of the failure.
+ *The descriptive message including the error the Amazon EMR service has identified as the + * cause of step failure. This is text from an error log that describes the root cause of the + * failure.
*/ Message?: string; /** - *The path to the log file where the step failure root cause was originally recorded.
+ *The path to the log file where the step failure root cause was originally + * recorded.
*/ LogFile?: string; } @@ -2224,7 +2651,8 @@ export type StepStateChangeReasonCode = "NONE"; */ export interface StepStateChangeReason { /** - *The programmable code for the state change reason. Note: Currently, the service provides no code for the state change.
+ *The programmable code for the state change reason. Note: Currently, the service provides + * no code for the state change.
*/ Code?: StepStateChangeReasonCode | string; @@ -2244,11 +2672,6 @@ export namespace StepStateChangeReason { *The timeline of the cluster step lifecycle.
*/ export interface StepTimeline { - /** - *The date and time when the cluster step execution completed or failed.
- */ - EndDateTime?: Date; - /** *The date and time when the cluster step was created.
*/ @@ -2258,6 +2681,11 @@ export interface StepTimeline { *The date and time when the cluster step execution started.
*/ StartDateTime?: Date; + + /** + *The date and time when the cluster step execution completed or failed.
+ */ + EndDateTime?: Date; } export namespace StepTimeline { @@ -2271,24 +2699,25 @@ export namespace StepTimeline { */ export interface StepStatus { /** - *The reason for the step execution status change.
+ *The execution state of the cluster step.
*/ - StateChangeReason?: StepStateChangeReason; + State?: StepState | string; /** - *The timeline of the cluster step status over time.
+ *The reason for the step execution status change.
*/ - Timeline?: StepTimeline; + StateChangeReason?: StepStateChangeReason; /** - *The execution state of the cluster step.
+ *The details for the step failure including reason, message, and log file path where the + * root cause was identified.
*/ - State?: StepState | string; + FailureDetails?: FailureDetails; /** - *The details for the step failure including reason, message, and log file path where the root cause was identified.
+ *The timeline of the cluster step status over time.
*/ - FailureDetails?: FailureDetails; + Timeline?: StepTimeline; } export namespace StepStatus { @@ -2302,49 +2731,169 @@ export namespace StepStatus { */ export interface Step { /** - *The identifier of the cluster step.
+ *The identifier of the cluster step.
+ */ + Id?: string; + + /** + *The name of the cluster step.
+ */ + Name?: string; + + /** + *The Hadoop job configuration of the cluster step.
+ */ + Config?: HadoopStepConfig; + + /** + *The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, + * CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. + * We recommend using TERMINATE_CLUSTER instead.
+ */ + ActionOnFailure?: ActionOnFailure | string; + + /** + *The current execution status details of the cluster step.
+ */ + Status?: StepStatus; +} + +export namespace Step { + export const filterSensitiveLog = (obj: Step): any => ({ + ...obj, + }); +} + +/** + *This output contains the description of the cluster step.
+ */ +export interface DescribeStepOutput { + /** + *The step details for the requested step identifier.
+ */ + Step?: Step; +} + +export namespace DescribeStepOutput { + export const filterSensitiveLog = (obj: DescribeStepOutput): any => ({ + ...obj, + }); +} + +export interface DescribeStudioInput { + /** + *The Amazon EMR Studio ID.
+ */ + StudioId: string | undefined; +} + +export namespace DescribeStudioInput { + export const filterSensitiveLog = (obj: DescribeStudioInput): any => ({ + ...obj, + }); +} + +/** + *Details for an Amazon EMR Studio including ID, creation time, name, and so on.
+ */ +export interface Studio { + /** + *The ID of the EMR Studio.
+ */ + StudioId?: string; + + /** + *The Amazon Resource Name (ARN) of the EMR Studio.
+ */ + StudioArn?: string; + + /** + *The name of the EMR Studio.
+ */ + Name?: string; + + /** + *The detailed description of the EMR Studio.
+ */ + Description?: string; + + /** + *Specifies whether the Studio authenticates users using single sign-on (SSO) or + * IAM.
+ */ + AuthMode?: AuthMode | string; + + /** + *The ID of the VPC associated with the EMR Studio.
+ */ + VpcId?: string; + + /** + *The list of IDs of the subnets associated with the Amazon EMR Studio.
+ */ + SubnetIds?: string[]; + + /** + *The name of the IAM role assumed by the Amazon EMR Studio.
+ */ + ServiceRole?: string; + + /** + *The name of the IAM role assumed by users logged in to the Amazon EMR Studio.
+ */ + UserRole?: string; + + /** + *The ID of the Workspace security group associated with the Amazon EMR Studio. The + * Workspace security group allows outbound network traffic to resources in the Engine + * security group and to the internet.
+ */ + WorkspaceSecurityGroupId?: string; + + /** + *The ID of the Engine security group associated with the Amazon EMR Studio. The Engine + * security group allows inbound network traffic from resources in the Workspace security + * group.
*/ - Id?: string; + EngineSecurityGroupId?: string; /** - *The Hadoop job configuration of the cluster step.
+ *The unique access URL of the Amazon EMR Studio.
*/ - Config?: HadoopStepConfig; + Url?: string; /** - *The current execution status details of the cluster step.
+ *The time the Amazon EMR Studio was created.
*/ - Status?: StepStatus; + CreationTime?: Date; /** - *The name of the cluster step.
+ *The default Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook + * files.
*/ - Name?: string; + DefaultS3Location?: string; /** - *The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.
+ *A list of tags associated with the Amazon EMR Studio.
*/ - ActionOnFailure?: ActionOnFailure | string; + Tags?: Tag[]; } -export namespace Step { - export const filterSensitiveLog = (obj: Step): any => ({ +export namespace Studio { + export const filterSensitiveLog = (obj: Studio): any => ({ ...obj, }); } -/** - *This output contains the description of the cluster step.
- */ -export interface DescribeStepOutput { +export interface DescribeStudioOutput { /** - *The step details for the requested step identifier.
+ *The Amazon EMR Studio details.
*/ - Step?: Step; + Studio?: Studio; } -export namespace DescribeStepOutput { - export const filterSensitiveLog = (obj: DescribeStepOutput): any => ({ +export namespace DescribeStudioOutput { + export const filterSensitiveLog = (obj: DescribeStudioOutput): any => ({ ...obj, }); } @@ -2354,7 +2903,8 @@ export namespace DescribeStepOutput { */ export interface EbsBlockDevice { /** - *EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.
+ *EBS volume specifications such as volume type, IOPS, and size (GiB) that will be + * requested for the EBS volume attached to an EC2 instance in the cluster.
*/ VolumeSpecification?: VolumeSpecification; @@ -2401,8 +2951,7 @@ export namespace GetBlockPublicAccessConfigurationInput { export interface GetManagedScalingPolicyInput { /** - *- * Specifies the ID of the cluster for which the managed scaling policy will be fetched. + *
Specifies the ID of the cluster for which the managed scaling policy will be fetched. *
*/ ClusterId: string | undefined; @@ -2415,13 +2964,15 @@ export namespace GetManagedScalingPolicyInput { } /** - *- * Managed scaling policy for an Amazon EMR cluster. The policy specifies the limits for resources that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration. - *
+ *Managed scaling policy for an Amazon EMR cluster. The policy specifies the limits for + * resources that can be added or terminated from a cluster. The policy only applies to the + * core and task nodes. The master node cannot be scaled after initial configuration.
*/ export interface ManagedScalingPolicy { /** - *The EC2 unit limits for a managed scaling policy. The managed scaling activity of a cluster is not allowed to go above or below these limits. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.
+ *The EC2 unit limits for a managed scaling policy. The managed scaling activity of a + * cluster is not allowed to go above or below these limits. The limit only applies to the + * core and task nodes. The master node cannot be scaled after initial configuration.
*/ ComputeLimits?: ComputeLimits; } @@ -2434,9 +2985,7 @@ export namespace ManagedScalingPolicy { export interface GetManagedScalingPolicyOutput { /** - *- * Specifies the managed scaling policy that is attached to an Amazon EMR cluster. - *
+ *Specifies the managed scaling policy that is attached to an Amazon EMR cluster.
*/ ManagedScalingPolicy?: ManagedScalingPolicy; } @@ -2447,6 +2996,100 @@ export namespace GetManagedScalingPolicyOutput { }); } +export interface GetStudioSessionMappingInput { + /** + *The ID of the Amazon EMR Studio.
+ */ + StudioId: string | undefined; + + /** + *The globally unique identifier (GUID) of the user or group. For more information, see
+ * UserId and GroupId in the AWS SSO Identity Store API Reference.
+ * Either IdentityName
or IdentityId
must be specified.
The name of the user or group to fetch. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.
+ * Either IdentityName
or IdentityId
must be specified.
Specifies whether the identity to fetch is a user or a group.
+ */ + IdentityType: IdentityType | string | undefined; +} + +export namespace GetStudioSessionMappingInput { + export const filterSensitiveLog = (obj: GetStudioSessionMappingInput): any => ({ + ...obj, + }); +} + +/** + *Details for an Amazon EMR Studio session mapping including creation time, user or group + * ID, Studio ID, and so on.
+ */ +export interface SessionMappingDetail { + /** + *The ID of the Amazon EMR Studio.
+ */ + StudioId?: string; + + /** + *The globally unique identifier (GUID) of the user or group.
+ */ + IdentityId?: string; + + /** + *The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API + * Reference.
+ */ + IdentityName?: string; + + /** + *Specifies whether the identity mapped to the Studio is a user or a group.
+ */ + IdentityType?: IdentityType | string; + + /** + *The Amazon Resource Name (ARN) of the session policy associated with the user or + * group.
+ */ + SessionPolicyArn?: string; + + /** + *The time the session mapping was created.
+ */ + CreationTime?: Date; + + /** + *The time the session mapping was last modified.
+ */ + LastModifiedTime?: Date; +} + +export namespace SessionMappingDetail { + export const filterSensitiveLog = (obj: SessionMappingDetail): any => ({ + ...obj, + }); +} + +export interface GetStudioSessionMappingOutput { + /** + *The session mapping details for the specified Amazon EMR Studio and identity, including + * session policy ARN and creation time.
+ */ + SessionMapping?: SessionMappingDetail; +} + +export namespace GetStudioSessionMappingOutput { + export const filterSensitiveLog = (obj: GetStudioSessionMappingOutput): any => ({ + ...obj, + }); +} + /** *This input determines which bootstrap actions to retrieve.
*/ @@ -2490,13 +3133,14 @@ export namespace ListBootstrapActionsOutput { } /** - *This input determines how the ListClusters action filters the list of clusters that it returns.
+ *This input determines how the ListClusters action filters the list of clusters that it + * returns.
*/ export interface ListClustersInput { /** - *The cluster state filters to apply when listing clusters.
+ *The creation date and time beginning value filter for listing clusters.
*/ - ClusterStates?: (ClusterState | string)[]; + CreatedAfter?: Date; /** *The creation date and time end value filter for listing clusters.
@@ -2504,14 +3148,14 @@ export interface ListClustersInput { CreatedBefore?: Date; /** - *The pagination token that indicates the next set of results to retrieve.
+ *The cluster state filters to apply when listing clusters.
*/ - Marker?: string; + ClusterStates?: (ClusterState | string)[]; /** - *The creation date and time beginning value filter for listing clusters.
+ *The pagination token that indicates the next set of results to retrieve.
*/ - CreatedAfter?: Date; + Marker?: string; } export namespace ListClustersInput { @@ -2521,18 +3165,19 @@ export namespace ListClustersInput { } /** - *This contains a ClusterSummaryList with the cluster details; for example, the cluster IDs, names, and status.
+ *This contains a ClusterSummaryList with the cluster details; for example, the cluster + * IDs, names, and status.
*/ export interface ListClustersOutput { /** - *The pagination token that indicates the next set of results to retrieve.
+ *The list of clusters for the account based on the given filters.
*/ - Marker?: string; + Clusters?: ClusterSummary[]; /** - *The list of clusters for the account based on the given filters.
+ *The pagination token that indicates the next set of results to retrieve.
*/ - Clusters?: ClusterSummary[]; + Marker?: string; } export namespace ListClustersOutput { @@ -2543,14 +3188,14 @@ export namespace ListClustersOutput { export interface ListInstanceFleetsInput { /** - *The pagination token that indicates the next set of results to retrieve.
+ *The unique identifier of the cluster.
*/ - Marker?: string; + ClusterId: string | undefined; /** - *The unique identifier of the cluster.
+ *The pagination token that indicates the next set of results to retrieve.
*/ - ClusterId: string | undefined; + Marker?: string; } export namespace ListInstanceFleetsInput { @@ -2579,19 +3224,20 @@ export enum InstanceFleetStateChangeReasonCode { /** *Provides status change reason details for the instance fleet.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*An explanatory message.
+ *A code corresponding to the reason the state change occurred.
*/ - Message?: string; + Code?: InstanceFleetStateChangeReasonCode | string; /** - *A code corresponding to the reason the state change occurred.
+ *An explanatory message.
*/ - Code?: InstanceFleetStateChangeReasonCode | string; + Message?: string; } export namespace InstanceFleetStateChangeReason { @@ -2601,9 +3247,11 @@ export namespace InstanceFleetStateChangeReason { } /** - *Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.
+ *Provides historical timestamps for the instance fleet, including the time of creation, + * the time it became ready to run jobs, and the time of termination.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*The status of the instance fleet.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*
- * PROVISIONING
—The instance fleet is provisioning EC2 resources and is not yet ready to run jobs.
PROVISIONING
—The instance fleet is provisioning EC2 resources and is
+ * not yet ready to run jobs.
*
- * BOOTSTRAPPING
—EC2 instances and other resources have been provisioned and the bootstrap actions specified for the instances are underway.
BOOTSTRAPPING
—EC2 instances and other resources have been provisioned
+ * and the bootstrap actions specified for the instances are underway.
*
- * RUNNING
—EC2 instances and other resources are running. They are either executing jobs or waiting to execute jobs.
RUNNING
—EC2 instances and other resources are running. They are
+ * either executing jobs or waiting to execute jobs.
*
- * RESIZING
—A resize operation is underway. EC2 instances are either being added or removed.
RESIZING
—A resize operation is underway. EC2 instances are either
+ * being added or removed.
*
- * SUSPENDED
—A resize operation could not complete. Existing EC2 instances are running, but instances can't be added or removed.
SUSPENDED
—A resize operation could not complete. Existing EC2
+ * instances are running, but instances can't be added or removed.
* @@ -2665,21 +3319,23 @@ export interface InstanceFleetStatus { *
- * TERMINATED
—The instance fleet is no longer active, and all EC2 instances have been terminated.
TERMINATED
—The instance fleet is no longer active, and all EC2
+ * instances have been terminated.
* Provides historical timestamps for the instance fleet, including the time of creation, the time it became ready to run jobs, and the time of termination.
+ *Provides status change reason details for the instance fleet.
*/ - Timeline?: InstanceFleetTimeline; + StateChangeReason?: InstanceFleetStateChangeReason; /** - *Provides status change reason details for the instance fleet.
+ *Provides historical timestamps for the instance fleet, including the time of creation, + * the time it became ready to run jobs, and the time of termination.
*/ - StateChangeReason?: InstanceFleetStateChangeReason; + Timeline?: InstanceFleetTimeline; } export namespace InstanceFleetStatus { @@ -2716,14 +3372,10 @@ export enum InstanceGroupType { } /** - *Custom policy for requesting termination protection or termination of specific instances when shrinking an instance group.
+ *Custom policy for requesting termination protection or termination of specific instances + * when shrinking an instance group.
*/ export interface InstanceResizePolicy { - /** - *Decommissioning timeout override for the specific list of instances to be terminated.
- */ - InstanceTerminationTimeout?: number; - /** *Specific list of instances to be terminated when shrinking an instance group.
*/ @@ -2733,6 +3385,12 @@ export interface InstanceResizePolicy { *Specific list of instances to be protected when shrinking an instance group.
*/ InstancesToProtect?: string[]; + + /** + *Decommissioning timeout override for the specific list of instances to be + * terminated.
+ */ + InstanceTerminationTimeout?: number; } export namespace InstanceResizePolicy { @@ -2742,16 +3400,19 @@ export namespace InstanceResizePolicy { } /** - *Policy for customizing shrink operations. Allows configuration of decommissioning timeout and targeted instance shrinking.
+ *Policy for customizing shrink operations. Allows configuration of decommissioning + * timeout and targeted instance shrinking.
*/ export interface ShrinkPolicy { /** - *The desired timeout for decommissioning an instance. Overrides the default YARN decommissioning timeout.
+ *The desired timeout for decommissioning an instance. Overrides the default YARN + * decommissioning timeout.
*/ DecommissionTimeout?: number; /** - *Custom policy for requesting termination protection or termination of specific instances when shrinking an instance group.
+ *Custom policy for requesting termination protection or termination of specific instances + * when shrinking an instance group.
*/ InstanceResizePolicy?: InstanceResizePolicy; } @@ -2774,14 +3435,14 @@ export enum InstanceGroupStateChangeReasonCode { */ export interface InstanceGroupStateChangeReason { /** - *The status change reason description.
+ *The programmable code for the state change reason.
*/ - Message?: string; + Code?: InstanceGroupStateChangeReasonCode | string; /** - *The programmable code for the state change reason.
+ *The status change reason description.
*/ - Code?: InstanceGroupStateChangeReasonCode | string; + Message?: string; } export namespace InstanceGroupStateChangeReason { @@ -2800,14 +3461,14 @@ export interface InstanceGroupTimeline { CreationDateTime?: Date; /** - *The date and time when the instance group terminated.
+ *The date and time when the instance group became ready to perform tasks.
*/ - EndDateTime?: Date; + ReadyDateTime?: Date; /** - *The date and time when the instance group became ready to perform tasks.
+ *The date and time when the instance group terminated.
*/ - ReadyDateTime?: Date; + EndDateTime?: Date; } export namespace InstanceGroupTimeline { @@ -2821,14 +3482,14 @@ export namespace InstanceGroupTimeline { */ export interface InstanceGroupStatus { /** - *The status change reason details for the instance group.
+ *The current state of the instance group.
*/ - StateChangeReason?: InstanceGroupStateChangeReason; + State?: InstanceGroupState | string; /** - *The current state of the instance group.
+ *The status change reason details for the instance group.
*/ - State?: InstanceGroupState | string; + StateChangeReason?: InstanceGroupStateChangeReason; /** *The timeline of the instance group status over time.
@@ -2855,39 +3516,40 @@ export enum InstanceState { */ export interface ListInstancesInput { /** - *The pagination token that indicates the next set of results to retrieve.
+ *The identifier of the cluster for which to list the instances.
*/ - Marker?: string; + ClusterId: string | undefined; /** - *The type of instance group for which to list the instances.
+ *The identifier of the instance group for which to list the instances.
*/ - InstanceGroupTypes?: (InstanceGroupType | string)[]; + InstanceGroupId?: string; /** - *A list of instance states that will filter the instances returned with this request.
+ *The type of instance group for which to list the instances.
*/ - InstanceStates?: (InstanceState | string)[]; + InstanceGroupTypes?: (InstanceGroupType | string)[]; /** - *The identifier of the cluster for which to list the instances.
+ *The unique identifier of the instance fleet.
*/ - ClusterId: string | undefined; + InstanceFleetId?: string; /** - *The identifier of the instance group for which to list the instances.
+ *The node type of the instance fleet. For example MASTER, CORE, or TASK.
*/ - InstanceGroupId?: string; + InstanceFleetType?: InstanceFleetType | string; /** - *The node type of the instance fleet. For example MASTER, CORE, or TASK.
+ *A list of instance states that will filter the instances returned with this + * request.
*/ - InstanceFleetType?: InstanceFleetType | string; + InstanceStates?: (InstanceState | string)[]; /** - *The unique identifier of the instance fleet.
+ *The pagination token that indicates the next set of results to retrieve.
*/ - InstanceFleetId?: string; + Marker?: string; } export namespace ListInstancesInput { @@ -2909,14 +3571,14 @@ export enum InstanceStateChangeReasonCode { */ export interface InstanceStateChangeReason { /** - *The status change reason description.
+ *The programmable code for the state change reason.
*/ - Message?: string; + Code?: InstanceStateChangeReasonCode | string; /** - *The programmable code for the state change reason.
+ *The status change reason description.
*/ - Code?: InstanceStateChangeReasonCode | string; + Message?: string; } export namespace InstanceStateChangeReason { @@ -2929,11 +3591,6 @@ export namespace InstanceStateChangeReason { *The timeline of the instance lifecycle.
*/ export interface InstanceTimeline { - /** - *The date and time when the instance was terminated.
- */ - EndDateTime?: Date; - /** *The creation date and time of the instance.
*/ @@ -2943,6 +3600,11 @@ export interface InstanceTimeline { *The date and time when the instance was ready to perform tasks.
*/ ReadyDateTime?: Date; + + /** + *The date and time when the instance was terminated.
+ */ + EndDateTime?: Date; } export namespace InstanceTimeline { @@ -2955,11 +3617,6 @@ export namespace InstanceTimeline { *The instance status details.
*/ export interface InstanceStatus { - /** - *The timeline of the instance status over time.
- */ - Timeline?: InstanceTimeline; - /** *The current state of the instance.
*/ @@ -2969,6 +3626,11 @@ export interface InstanceStatus { *The details of the status change reason for the instance.
*/ StateChangeReason?: InstanceStateChangeReason; + + /** + *The timeline of the instance status over time.
+ */ + Timeline?: InstanceTimeline; } export namespace InstanceStatus { @@ -2982,19 +3644,19 @@ export namespace InstanceStatus { */ export interface Instance { /** - *The public DNS name of the instance.
+ *The unique identifier for the instance in Amazon EMR.
*/ - PublicDnsName?: string; + Id?: string; /** - *The list of EBS volumes that are attached to this instance.
+ *The unique identifier of the instance in Amazon EC2.
*/ - EbsVolumes?: EbsVolume[]; + Ec2InstanceId?: string; /** - *The private IP address of the instance.
+ *The public DNS name of the instance.
*/ - PrivateIpAddress?: string; + PublicDnsName?: string; /** *The public IP address of the instance.
@@ -3002,9 +3664,14 @@ export interface Instance { PublicIpAddress?: string; /** - *The unique identifier of the instance in Amazon EC2.
+ *The private DNS name of the instance.
*/ - Ec2InstanceId?: string; + PrivateDnsName?: string; + + /** + *The private IP address of the instance.
+ */ + PrivateIpAddress?: string; /** *The current status of the instance.
@@ -3017,30 +3684,25 @@ export interface Instance { InstanceGroupId?: string; /** - *The private DNS name of the instance.
- */ - PrivateDnsName?: string; - - /** - *The EC2 instance type, for example m3.xlarge
.
The unique identifier of the instance fleet to which an EC2 instance belongs.
*/ - InstanceType?: string; + InstanceFleetId?: string; /** - *The unique identifier for the instance in Amazon EMR.
+ *The instance purchasing option. Valid values are ON_DEMAND
or
+ * SPOT
.
The unique identifier of the instance fleet to which an EC2 instance belongs.
+ *The EC2 instance type, for example m3.xlarge
.
The instance purchasing option. Valid values are ON_DEMAND
or SPOT
.
- *
The list of EBS volumes that are attached to this instance.
*/ - Market?: MarketType | string; + EbsVolumes?: EbsVolume[]; } export namespace Instance { @@ -3065,58 +3727,49 @@ export interface ListInstancesOutput { } export namespace ListInstancesOutput { - export const filterSensitiveLog = (obj: ListInstancesOutput): any => ({ - ...obj, - }); -} - -export interface ListNotebookExecutionsInput { - /** - *The end of time range filter for listing notebook executions. The default is the current timestamp.
- */ - To?: Date; - - /** - *The beginning of time range filter for listing notebook executions. The default is the timestamp of 30 days ago.
- */ - From?: Date; + export const filterSensitiveLog = (obj: ListInstancesOutput): any => ({ + ...obj, + }); +} +export interface ListNotebookExecutionsInput { /** *The unique ID of the editor associated with the notebook execution.
*/ EditorId?: string; - /** - *The pagination token, returned by a previous ListNotebookExecutions
call, that indicates the start of the list for this ListNotebookExecutions
call.
The status filter for listing notebook executions.
*
- * START_PENDING
indicates that the cluster has received the execution request but execution has not begun.
START_PENDING
indicates that the cluster has received the execution
+ * request but execution has not begun.
*
- * STARTING
indicates that the execution is starting on the cluster.
STARTING
indicates that the execution is starting on the
+ * cluster.
*
- * RUNNING
indicates that the execution is being processed by the cluster.
RUNNING
indicates that the execution is being processed by the
+ * cluster.
*
- * FINISHING
indicates that execution processing is in the final stages.
FINISHING
indicates that execution processing is in the final
+ * stages.
*
- * FINISHED
indicates that the execution has completed without error.
FINISHED
indicates that the execution has completed without
+ * error.
*
- * FAILING
indicates that the execution is failing and will not finish successfully.
FAILING
indicates that the execution is failing and will not finish
+ * successfully.
* @@ -3124,19 +3777,41 @@ export interface ListNotebookExecutionsInput { *
- * STOP_PENDING
indicates that the cluster has received a StopNotebookExecution
request and the stop is pending.
STOP_PENDING
indicates that the cluster has received a
+ * StopNotebookExecution
request and the stop is pending.
*
- * STOPPING
indicates that the cluster is in the process of stopping the execution as a result of a StopNotebookExecution
request.
STOPPING
indicates that the cluster is in the process of stopping the
+ * execution as a result of a StopNotebookExecution
request.
*
- * STOPPED
indicates that the execution stopped because of a StopNotebookExecution
request.
STOPPED
indicates that the execution stopped because of a
+ * StopNotebookExecution
request.
* The beginning of time range filter for listing notebook executions. The default is the + * timestamp of 30 days ago.
+ */ + From?: Date; + + /** + *The end of time range filter for listing notebook executions. The default is the current + * timestamp.
+ */ + To?: Date; + + /** + *The pagination token, returned by a previous ListNotebookExecutions
call,
+ * that indicates the start of the list for this ListNotebookExecutions
+ * call.
The name of the notebook execution.
- */ - NotebookExecutionName?: string; - - /** - *The unique identifier of the editor associated with the notebook execution.
- */ - EditorId?: string; - /** *The unique identifier of the notebook execution.
*/ NotebookExecutionId?: string; /** - *The timestamp when notebook execution started.
+ *The unique identifier of the editor associated with the notebook execution.
*/ - StartTime?: Date; + EditorId?: string; /** - *The timestamp when notebook execution started.
+ *The name of the notebook execution.
*/ - EndTime?: Date; + NotebookExecutionName?: string; /** *The status of the notebook execution.
*
- * START_PENDING
indicates that the cluster has received the execution request but execution has not begun.
START_PENDING
indicates that the cluster has received the execution
+ * request but execution has not begun.
*
- * STARTING
indicates that the execution is starting on the cluster.
STARTING
indicates that the execution is starting on the
+ * cluster.
*
- * RUNNING
indicates that the execution is being processed by the cluster.
RUNNING
indicates that the execution is being processed by the
+ * cluster.
*
- * FINISHING
indicates that execution processing is in the final stages.
FINISHING
indicates that execution processing is in the final
+ * stages.
*
- * FINISHED
indicates that the execution has completed without error.
FINISHED
indicates that the execution has completed without
+ * error.
*
- * FAILING
indicates that the execution is failing and will not finish successfully.
FAILING
indicates that the execution is failing and will not finish
+ * successfully.
* @@ -3207,19 +3878,32 @@ export interface NotebookExecutionSummary { *
- * STOP_PENDING
indicates that the cluster has received a StopNotebookExecution
request and the stop is pending.
STOP_PENDING
indicates that the cluster has received a
+ * StopNotebookExecution
request and the stop is pending.
*
- * STOPPING
indicates that the cluster is in the process of stopping the execution as a result of a StopNotebookExecution
request.
STOPPING
indicates that the cluster is in the process of stopping the
+ * execution as a result of a StopNotebookExecution
request.
*
- * STOPPED
indicates that the execution stopped because of a StopNotebookExecution
request.
STOPPED
indicates that the execution stopped because of a
+ * StopNotebookExecution
request.
* The timestamp when notebook execution started.
+ */ + StartTime?: Date; + + /** + *The timestamp when notebook execution started.
+ */ + EndTime?: Date; } export namespace NotebookExecutionSummary { @@ -3230,14 +3914,15 @@ export namespace NotebookExecutionSummary { export interface ListNotebookExecutionsOutput { /** - *A pagination token that a subsequent ListNotebookExecutions
can use to determine the next set of results to retrieve.
A list of notebook executions.
*/ - Marker?: string; + NotebookExecutions?: NotebookExecutionSummary[]; /** - *A list of notebook executions.
+ *A pagination token that a subsequent ListNotebookExecutions
can use to
+ * determine the next set of results to retrieve.
The date and time the security configuration was created.
+ *The name of the security configuration.
*/ - CreationDateTime?: Date; + Name?: string; /** - *The name of the security configuration.
+ *The date and time the security configuration was created.
*/ - Name?: string; + CreationDateTime?: Date; } export namespace SecurityConfigurationSummary { @@ -3287,7 +3972,9 @@ export interface ListSecurityConfigurationsOutput { SecurityConfigurations?: SecurityConfigurationSummary[]; /** - *A pagination token that indicates the next set of results to retrieve. Include the marker in the next ListSecurityConfiguration call to retrieve the next page of results, if required.
+ *A pagination token that indicates the next set of results to retrieve. Include the + * marker in the next ListSecurityConfiguration call to retrieve the next page of results, if + * required.
*/ Marker?: string; } @@ -3303,24 +3990,26 @@ export namespace ListSecurityConfigurationsOutput { */ export interface ListStepsInput { /** - *The pagination token that indicates the next set of results to retrieve.
+ *The identifier of the cluster for which to list the steps.
*/ - Marker?: string; + ClusterId: string | undefined; /** - *The filter to limit the step list based on the identifier of the steps. You can specify a maximum of ten Step IDs. The character constraint applies to the overall length of the array.
+ *The filter to limit the step list based on certain states.
*/ - StepIds?: string[]; + StepStates?: (StepState | string)[]; /** - *The filter to limit the step list based on certain states.
+ *The filter to limit the step list based on the identifier of the steps. You can specify + * a maximum of ten Step IDs. The character constraint applies to the overall length of the + * array.
*/ - StepStates?: (StepState | string)[]; + StepIds?: string[]; /** - *The identifier of the cluster for which to list the steps.
+ *The pagination token that indicates the next set of results to retrieve.
*/ - ClusterId: string | undefined; + Marker?: string; } export namespace ListStepsInput { @@ -3334,29 +4023,31 @@ export namespace ListStepsInput { */ export interface StepSummary { /** - *The Hadoop job configuration of the cluster step.
+ *The identifier of the cluster step.
*/ - Config?: HadoopStepConfig; + Id?: string; /** - *The current execution status details of the cluster step.
+ *The name of the cluster step.
*/ - Status?: StepStatus; + Name?: string; /** - *The name of the cluster step.
+ *The Hadoop job configuration of the cluster step.
*/ - Name?: string; + Config?: HadoopStepConfig; /** - *The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility. We recommend using TERMINATE_CLUSTER instead.
+ *The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, + * CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility. + * We recommend using TERMINATE_CLUSTER instead.
*/ ActionOnFailure?: ActionOnFailure | string; /** - *The identifier of the cluster step.
+ *The current execution status details of the cluster step.
*/ - Id?: string; + Status?: StepStatus; } export namespace StepSummary { @@ -3366,7 +4057,8 @@ export namespace StepSummary { } /** - *This output contains the list of steps returned in reverse order. This means that the last step is the first element in the list.
+ *This output contains the list of steps returned in reverse order. This means that the + * last step is the first element in the list.
*/ export interface ListStepsOutput { /** @@ -3386,16 +4078,180 @@ export namespace ListStepsOutput { }); } -export interface ModifyClusterInput { +export interface ListStudiosInput { /** - *The number of steps that can be executed concurrently. You can specify a maximum of 256 steps.
+ *The pagination token that indicates the set of results to retrieve.
*/ - StepConcurrencyLevel?: number; + Marker?: string; +} + +export namespace ListStudiosInput { + export const filterSensitiveLog = (obj: ListStudiosInput): any => ({ + ...obj, + }); +} + +/** + *Details for an Amazon EMR Studio, including ID, Name, VPC, and Description. The details + * do not include subnets, IAM roles, security groups, or tags associated with the + * Studio.
+ */ +export interface StudioSummary { + /** + *The ID of the Amazon EMR Studio.
+ */ + StudioId?: string; + + /** + *The name of the Amazon EMR Studio.
+ */ + Name?: string; + + /** + *The ID of the Virtual Private Cloud (Amazon VPC) associated with the Amazon EMR + * Studio.
+ */ + VpcId?: string; + + /** + *The detailed description of the EMR Studio.
+ */ + Description?: string; + + /** + *The unique access URL of the Amazon EMR Studio.
+ */ + Url?: string; + + /** + *The time when the Amazon EMR Studio was created.
+ */ + CreationTime?: Date; +} + +export namespace StudioSummary { + export const filterSensitiveLog = (obj: StudioSummary): any => ({ + ...obj, + }); +} + +export interface ListStudiosOutput { + /** + *The list of Studio summary objects.
+ */ + Studios?: StudioSummary[]; + + /** + *The pagination token that indicates the next set of results to retrieve.
+ */ + Marker?: string; +} + +export namespace ListStudiosOutput { + export const filterSensitiveLog = (obj: ListStudiosOutput): any => ({ + ...obj, + }); +} + +export interface ListStudioSessionMappingsInput { + /** + *The ID of the Amazon EMR Studio.
+ */ + StudioId?: string; + + /** + *Specifies whether to return session mappings for users or groups. If not specified, the + * results include session mapping details for both users and groups.
+ */ + IdentityType?: IdentityType | string; + + /** + *The pagination token that indicates the set of results to retrieve.
+ */ + Marker?: string; +} + +export namespace ListStudioSessionMappingsInput { + export const filterSensitiveLog = (obj: ListStudioSessionMappingsInput): any => ({ + ...obj, + }); +} + +/** + *Details for an Amazon EMR Studio session mapping. The details do not include the time + * the session mapping was last modified.
+ */ +export interface SessionMappingSummary { + /** + *The ID of the Amazon EMR Studio.
+ */ + StudioId?: string; + + /** + *The globally unique identifier (GUID) of the user or group from the AWS SSO Identity + * Store.
+ */ + IdentityId?: string; + + /** + *The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API + * Reference.
+ */ + IdentityName?: string; + + /** + *Specifies whether the identity mapped to the Studio is a user or a group.
+ */ + IdentityType?: IdentityType | string; + + /** + *The Amazon Resource Name (ARN) of the session policy associated with the user or + * group.
+ */ + SessionPolicyArn?: string; + + /** + *The time the session mapping was created.
+ */ + CreationTime?: Date; +} + +export namespace SessionMappingSummary { + export const filterSensitiveLog = (obj: SessionMappingSummary): any => ({ + ...obj, + }); +} + +export interface ListStudioSessionMappingsOutput { + /** + *A list of session mapping summary objects. Each object includes session mapping details + * such as creation time, identity type (user or group), and Studio ID.
+ */ + SessionMappings?: SessionMappingSummary[]; + + /** + *The pagination token that indicates the next set of results to retrieve.
+ */ + Marker?: string; +} +export namespace ListStudioSessionMappingsOutput { + export const filterSensitiveLog = (obj: ListStudioSessionMappingsOutput): any => ({ + ...obj, + }); +} + +export interface ModifyClusterInput { /** *The unique identifier of the cluster.
*/ ClusterId: string | undefined; + + /** + *The number of steps that can be executed concurrently. You can specify a maximum of 256 + * steps.
+ */ + StepConcurrencyLevel?: number; } export namespace ModifyClusterInput { @@ -3420,7 +4276,8 @@ export namespace ModifyClusterOutput { /** *Configuration parameters for an instance fleet modification request.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*The target capacity of Spot units for the instance fleet. For more information, see InstanceFleetConfig$TargetSpotCapacity.
+ *The target capacity of On-Demand units for the instance fleet. For more information see + * InstanceFleetConfig$TargetOnDemandCapacity.
*/ - TargetSpotCapacity?: number; + TargetOnDemandCapacity?: number; /** - *The target capacity of On-Demand units for the instance fleet. For more information see InstanceFleetConfig$TargetOnDemandCapacity.
+ *The target capacity of Spot units for the instance fleet. For more information, see + * InstanceFleetConfig$TargetSpotCapacity.
*/ - TargetOnDemandCapacity?: number; + TargetSpotCapacity?: number; } export namespace InstanceFleetModifyConfig { @@ -3448,14 +4307,14 @@ export namespace InstanceFleetModifyConfig { export interface ModifyInstanceFleetInput { /** - *The unique identifier of the instance fleet.
+ *The unique identifier of the cluster.
*/ - InstanceFleet: InstanceFleetModifyConfig | undefined; + ClusterId: string | undefined; /** - *The unique identifier of the cluster.
+ *The unique identifier of the instance fleet.
*/ - ClusterId: string | undefined; + InstanceFleet: InstanceFleetModifyConfig | undefined; } export namespace ModifyInstanceFleetInput { @@ -3466,19 +4325,21 @@ export namespace ModifyInstanceFleetInput { export interface PutAutoScalingPolicyInput { /** - *Specifies the definition of the automatic scaling policy.
+ *Specifies the ID of a cluster. The instance group to which the automatic scaling policy + * is applied is within this cluster.
*/ - AutoScalingPolicy: AutoScalingPolicy | undefined; + ClusterId: string | undefined; /** - *Specifies the ID of a cluster. The instance group to which the automatic scaling policy is applied is within this cluster.
+ *Specifies the ID of the instance group to which the automatic scaling policy is + * applied.
*/ - ClusterId: string | undefined; + InstanceGroupId: string | undefined; /** - *Specifies the ID of the instance group to which the automatic scaling policy is applied.
+ *Specifies the definition of the automatic scaling policy.
*/ - InstanceGroupId: string | undefined; + AutoScalingPolicy: AutoScalingPolicy | undefined; } export namespace PutAutoScalingPolicyInput { @@ -3489,15 +4350,11 @@ export namespace PutAutoScalingPolicyInput { export interface PutAutoScalingPolicyOutput { /** - *Specifies the ID of a cluster. The instance group to which the automatic scaling policy is applied is within this cluster.
+ *Specifies the ID of a cluster. The instance group to which the automatic scaling policy + * is applied is within this cluster.
*/ ClusterId?: string; - /** - *The Amazon Resource Name of the cluster.
- */ - ClusterArn?: string; - /** *Specifies the ID of the instance group to which the scaling policy is applied.
*/ @@ -3507,6 +4364,11 @@ export interface PutAutoScalingPolicyOutput { *The automatic scaling policy definition.
*/ AutoScalingPolicy?: AutoScalingPolicyDescription; + + /** + *The Amazon Resource Name of the cluster.
+ */ + ClusterArn?: string; } export namespace PutAutoScalingPolicyOutput { @@ -3525,16 +4387,12 @@ export namespace PutBlockPublicAccessConfigurationOutput { export interface PutManagedScalingPolicyInput { /** - *- * Specifies the ID of an EMR cluster where the managed scaling policy is attached. - *
+ *Specifies the ID of an EMR cluster where the managed scaling policy is attached.
*/ ClusterId: string | undefined; /** - *- * Specifies the constraints for the managed scaling policy. - *
+ *Specifies the constraints for the managed scaling policy.
*/ ManagedScalingPolicy: ManagedScalingPolicy | undefined; } @@ -3555,14 +4413,15 @@ export namespace PutManagedScalingPolicyOutput { export interface RemoveAutoScalingPolicyInput { /** - *Specifies the ID of the instance group to which the scaling policy is applied.
+ *Specifies the ID of a cluster. The instance group to which the automatic scaling policy + * is applied is within this cluster.
*/ - InstanceGroupId: string | undefined; + ClusterId: string | undefined; /** - *Specifies the ID of a cluster. The instance group to which the automatic scaling policy is applied is within this cluster.
+ *Specifies the ID of the instance group to which the scaling policy is applied.
*/ - ClusterId: string | undefined; + InstanceGroupId: string | undefined; } export namespace RemoveAutoScalingPolicyInput { @@ -3581,8 +4440,7 @@ export namespace RemoveAutoScalingPolicyOutput { export interface RemoveManagedScalingPolicyInput { /** - *- * Specifies the ID of the cluster from which the managed scaling policy will be removed. + *
Specifies the ID of the cluster from which the managed scaling policy will be removed. *
*/ ClusterId: string | undefined; @@ -3607,7 +4465,8 @@ export namespace RemoveManagedScalingPolicyOutput { */ export interface RemoveTagsInput { /** - *The Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.
+ *The Amazon EMR resource identifier from which tags will be removed. This value must be a + * cluster identifier.
*/ ResourceId: string | undefined; @@ -3635,18 +4494,20 @@ export namespace RemoveTagsOutput { } /** - *The list of supported product configurations which allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.
+ *The list of supported product configurations which allow user-supplied arguments. EMR + * accepts these arguments and forwards them to the corresponding installation script as + * bootstrap action arguments.
*/ export interface SupportedProductConfig { /** - *The list of user-supplied arguments.
+ *The name of the product configuration.
*/ - Args?: string[]; + Name?: string; /** - *The name of the product configuration.
+ *The list of user-supplied arguments.
*/ - Name?: string; + Args?: string[]; } export namespace SupportedProductConfig { @@ -3660,14 +4521,14 @@ export namespace SupportedProductConfig { */ export interface RunJobFlowOutput { /** - *The Amazon Resource Name of the cluster.
+ *An unique identifier for the job flow.
*/ - ClusterArn?: string; + JobFlowId?: string; /** - *An unique identifier for the job flow.
+ *The Amazon Resource Name of the cluster.
*/ - JobFlowId?: string; + ClusterArn?: string; } export namespace RunJobFlowOutput { @@ -3681,15 +4542,17 @@ export namespace RunJobFlowOutput { */ export interface SetTerminationProtectionInput { /** - *A Boolean that indicates whether to protect the cluster and prevent the Amazon EC2 instances in the cluster from shutting down due to API calls, user intervention, or job-flow error.
+ *A list of strings that uniquely identify the clusters to protect. This identifier is + * returned by RunJobFlow and can also be obtained from DescribeJobFlows .
*/ - TerminationProtected: boolean | undefined; + JobFlowIds: string[] | undefined; /** - *A list of strings that uniquely identify the clusters to protect. This identifier is returned by - * RunJobFlow and can also be obtained from DescribeJobFlows .
+ *A Boolean that indicates whether to protect the cluster and prevent the Amazon EC2 + * instances in the cluster from shutting down due to API calls, user intervention, or + * job-flow error.
*/ - JobFlowIds: string[] | undefined; + TerminationProtected: boolean | undefined; } export namespace SetTerminationProtectionInput { @@ -3708,7 +4571,10 @@ export interface SetVisibleToAllUsersInput { JobFlowIds: string[] | undefined; /** - *A value of true
indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false
indicates that only the IAM user who created the cluster can perform actions.
A value of true
indicates that all IAM users in the AWS account can perform
+ * cluster actions if they have the proper IAM policy permissions. This is the default. A
+ * value of false
indicates that only the IAM user who created the cluster can
+ * perform actions.
The name or ARN of the IAM role that is used as the service role for Amazon EMR (the EMR role) for the notebook execution.
+ *The unique identifier of the EMR Notebook to use for notebook execution.
*/ - ServiceRole: string | undefined; + EditorId: string | undefined; /** - *A list of tags associated with a notebook execution. Tags are user-defined key value pairs that consist of a required key string with a maximum of 128 characters and an optional value string with a maximum of 256 characters.
+ *The path and file name of the notebook file for this execution, relative to the path
+ * specified for the EMR Notebook. For example, if you specify a path of
+ * s3://MyBucket/MyNotebooks
when you create an EMR Notebook for a notebook
+ * with an ID of e-ABCDEFGHIJK1234567890ABCD
(the EditorID
of this
+ * request), and you specify a RelativePath
of
+ * my_notebook_executions/notebook_execution.ipynb
, the location of the file
+ * for the notebook execution is
+ * s3://MyBucket/MyNotebooks/e-ABCDEFGHIJK1234567890ABCD/my_notebook_executions/notebook_execution.ipynb
.
An optional name for the notebook execution.
@@ -3736,12 +4609,8 @@ export interface StartNotebookExecutionInput { NotebookExecutionName?: string; /** - *The path and file name of the notebook file for this execution, relative to the path specified for the EMR Notebook. For example, if you specify a path of s3://MyBucket/MyNotebooks
when you create an EMR Notebook for a notebook with an ID of e-ABCDEFGHIJK1234567890ABCD
(the EditorID
of this request), and you specify a RelativePath
of my_notebook_executions/notebook_execution.ipynb
, the location of the file for the notebook execution is s3://MyBucket/MyNotebooks/e-ABCDEFGHIJK1234567890ABCD/my_notebook_executions/notebook_execution.ipynb
.
Input parameters in JSON format passed to the EMR Notebook at runtime for execution.
+ *Input parameters in JSON format passed to the EMR Notebook at runtime for + * execution.
*/ NotebookParams?: string; @@ -3751,14 +4620,23 @@ export interface StartNotebookExecutionInput { ExecutionEngine: ExecutionEngineConfig | undefined; /** - *The unique identifier of the EMR Notebook to use for notebook execution.
+ *The name or ARN of the IAM role that is used as the service role for Amazon EMR (the EMR + * role) for the notebook execution.
*/ - EditorId: string | undefined; + ServiceRole: string | undefined; /** - *The unique identifier of the Amazon EC2 security group to associate with the EMR Notebook for this notebook execution.
+ *The unique identifier of the Amazon EC2 security group to associate with the EMR + * Notebook for this notebook execution.
*/ NotebookInstanceSecurityGroupId?: string; + + /** + *A list of tags associated with a notebook execution. Tags are user-defined key-value + * pairs that consist of a required key string with a maximum of 128 characters and an + * optional value string with a maximum of 256 characters.
+ */ + Tags?: Tag[]; } export namespace StartNotebookExecutionInput { @@ -3798,7 +4676,7 @@ export namespace StopNotebookExecutionInput { */ export interface TerminateJobFlowsInput { /** - *A list of job flows to be shutdown.
+ *A list of job flows to be shut down.
*/ JobFlowIds: string[] | undefined; } @@ -3809,11 +4687,53 @@ export namespace TerminateJobFlowsInput { }); } +export interface UpdateStudioSessionMappingInput { + /** + *The ID of the EMR Studio.
+ */ + StudioId: string | undefined; + + /** + *The globally unique identifier (GUID) of the user or group. For more information, see
+ * UserId and GroupId in the AWS SSO Identity Store API Reference.
+ * Either IdentityName
or IdentityId
must be specified.
The name of the user or group to update. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.
+ * Either IdentityName
or IdentityId
must be specified.
Specifies whether the identity to update is a user or a group.
+ */ + IdentityType: IdentityType | string | undefined; + + /** + *The Amazon Resource Name (ARN) of the session policy to associate with the specified + * user or group.
+ */ + SessionPolicyArn: string | undefined; +} + +export namespace UpdateStudioSessionMappingInput { + export const filterSensitiveLog = (obj: UpdateStudioSessionMappingInput): any => ({ + ...obj, + }); +} + /** *Amazon EMR releases 4.x or later.
*An optional configuration specification to be used when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file. For more information, see Configuring Applications.
+ *An optional configuration specification to be used when provisioning cluster instances, + * which can include configurations for applications and software bundled with Amazon EMR. A + * configuration consists of a classification, properties, and optional nested configurations. + * A classification refers to an application-specific configuration file. Properties are the + * settings you want to change in that file. For more information, see Configuring + * Applications.
*/ export interface Configuration { /** @@ -3822,14 +4742,14 @@ export interface Configuration { Classification?: string; /** - *A set of properties specified within a configuration classification.
+ *A list of additional configurations to apply within a configuration object.
*/ - Properties?: { [key: string]: string }; + Configurations?: Configuration[]; /** - *A list of additional configurations to apply within a configuration object.
+ *A set of properties specified within a configuration classification.
*/ - Configurations?: Configuration[]; + Properties?: { [key: string]: string }; } export namespace Configuration { @@ -3839,34 +4759,46 @@ export namespace Configuration { } /** - *A configuration for Amazon EMR block public access. When BlockPublicSecurityGroupRules
is set to true
, Amazon EMR prevents cluster creation if one of the cluster's security groups has a rule that allows inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges
.
A configuration for Amazon EMR block public access. When
+ * BlockPublicSecurityGroupRules
is set to true
, Amazon EMR
+ * prevents cluster creation if one of the cluster's security groups has a rule that allows
+ * inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an
+ * exception using PermittedPublicSecurityGroupRuleRanges
.
A list of additional configurations to apply within a configuration object.
+ *Indicates whether Amazon EMR block public access is enabled (true
) or
+ * disabled (false
). By default, the value is false
for accounts
+ * that have created EMR clusters before July 2019. For accounts created after this, the
+ * default is true
.
Indicates whether EMR block public access is enabled (true
) or disabled (false
). By default, the value is false
for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true
.
Specifies ports and port ranges that are permitted to have security group rules that
+ * allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is
+ * specified for PermittedPublicSecurityGroupRuleRanges
, Amazon EMR allows
+ * cluster creation if a security group associated with the cluster has a rule that allows
+ * inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.
By default, Port 22, which is used for SSH access to the cluster EC2 instances, is in
+ * the list of PermittedPublicSecurityGroupRuleRanges
.
A set of properties specified within a configuration classification.
+ *The classification within a configuration.
*/ - Properties?: { [key: string]: string }; + Classification?: string; /** - *Specifies ports and port ranges that are permitted to have security group rules that allow inbound traffic from all public sources. For example, if Port 23 (Telnet) is specified for PermittedPublicSecurityGroupRuleRanges
, Amazon EMR allows cluster creation if a security group associated with the cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 or IPv6 port ::/0 as the source.
By default, Port 22, which is used for SSH access to the cluster EC2 instances, is in the list of PermittedPublicSecurityGroupRuleRanges
.
A list of additional configurations to apply within a configuration object.
*/ - PermittedPublicSecurityGroupRuleRanges?: PortRange[]; + Configurations?: Configuration[]; /** - *The classification within a configuration.
+ *A set of properties specified within a configuration classification.
*/ - Classification?: string; + Properties?: { [key: string]: string }; } export namespace BlockPublicAccessConfiguration { @@ -3880,159 +4812,202 @@ export namespace BlockPublicAccessConfiguration { */ export interface Cluster { /** - *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
- *The instance group configuration of the cluster. A value of INSTANCE_GROUP
indicates a uniform instance group configuration. A value of INSTANCE_FLEET
indicates an instance fleets configuration.
The unique identifier for the cluster.
*/ - InstanceCollectionType?: InstanceCollectionType | string; + Id?: string; /** - *The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x
, where x.x.x is an Amazon EMR release version such as emr-5.14.0
. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion
.
The name of the cluster.
*/ - ReleaseLabel?: string; + Name?: string; /** - *The Amazon Resource Name of the cluster.
+ *The current status details about the cluster.
*/ - ClusterArn?: string; + Status?: ClusterStatus; /** - *Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.
+ *Provides information about the EC2 instances in a cluster grouped by category. For + * example, key name, subnet ID, IAM instance profile, and so on.
*/ Ec2InstanceAttributes?: Ec2InstanceAttributes; /** - *The name of the cluster.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
+ *The instance group configuration of the cluster. A value of INSTANCE_GROUP
+ * indicates a uniform instance group configuration. A value of INSTANCE_FLEET
+ * indicates an instance fleets configuration.
The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.
+ *The path to the Amazon S3 location where logs for this cluster are stored.
*/ - EbsRootVolumeSize?: number; + LogUri?: string; /** - *The AMI version running on this cluster.
+ *The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is + * only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.
*/ - RunningAmiVersion?: string; + LogEncryptionKmsKeyId?: string; /** - *Placement group configured for an Amazon EMR cluster.
+ *The AMI version requested for this cluster.
*/ - PlacementGroups?: PlacementGroupConfig[]; + RequestedAmiVersion?: string; /** - *Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.
+ *The AMI version running on this cluster.
*/ - TerminationProtected?: boolean; + RunningAmiVersion?: string; /** - *The way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR
indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION
indicates that Amazon EMR blacklists and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION
is available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.
The Amazon EMR release label, which determines the version of open-source application
+ * packages installed on the cluster. Release labels are in the form emr-x.x.x
,
+ * where x.x.x is an Amazon EMR release version such as emr-5.14.0
. For more
+ * information about Amazon EMR release versions and included application versions and
+ * features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release
+ * label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use
+ * AmiVersion
.
A list of tags associated with a cluster.
+ *Specifies whether the cluster should terminate after completing all steps.
*/ - Tags?: Tag[]; + AutoTerminate?: boolean; /** - *- * The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. - *
+ *Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from + * being terminated by an API call or user intervention, or in the event of a cluster + * error.
*/ - OutpostArn?: string; + TerminationProtected?: boolean; /** - *The path to the Amazon S3 location where logs for this cluster are stored.
+ *Indicates whether the cluster is visible to all IAM users of the AWS account associated
+ * with the cluster. The default value, true
, indicates that all IAM users in the
+ * AWS account can perform cluster actions if they have the proper IAM policy permissions. If
+ * this value is false
, only the IAM user that created the cluster can perform
+ * actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of
+ * true
when you create a cluster by using the VisibleToAllUsers
+ * parameter of the RunJobFlow
action.
The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
+ *The applications installed on this cluster.
*/ - ServiceRole?: string; + Applications?: Application[]; /** - *The AMI version requested for this cluster.
+ *A list of tags associated with a cluster.
*/ - RequestedAmiVersion?: string; + Tags?: Tag[]; /** - *Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.
+ *The IAM role that will be assumed by the Amazon EMR service to access AWS resources on + * your behalf.
*/ - Configurations?: Configuration[]; + ServiceRole?: string; /** - *The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.
+ *An approximation of the cost of the cluster, represented in m1.small/hours. This value + * is incremented one time for every hour an m1.small instance runs. Larger instances are + * weighted more, so an EC2 instance that is roughly four times more expensive would result in + * the normalized instance hours being incremented by four. This result is only an + * approximation and does not reflect the actual billing rate.
*/ - LogEncryptionKmsKeyId?: string; + NormalizedInstanceHours?: number; /** - *Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see Use Kerberos Authentication in the EMR Management Guide.
+ *The DNS name of the master node. If the cluster is on a private subnet, this is the + * private DNS name. On a public subnet, this is the public DNS name.
*/ - KerberosAttributes?: KerberosAttributes; + MasterPublicDnsName?: string; /** - *The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.
+ *Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied + * to the EMR cluster.
*/ - MasterPublicDnsName?: string; + Configurations?: Configuration[]; /** - *The current status details about the cluster.
+ *The name of the security configuration applied to the cluster.
*/ - Status?: ClusterStatus; + SecurityConfiguration?: string; /** - *Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI if the cluster uses a custom AMI.
+ *An IAM role for automatic scaling policies. The default role is
+ * EMR_AutoScaling_DefaultRole
. The IAM role provides permissions that the
+ * automatic scaling feature requires to launch and terminate EC2 instances in an instance
+ * group.
An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.
+ *The way that individual Amazon EC2 instances terminate when an automatic scale-in
+ * activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR
+ * indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of
+ * when the request to terminate the instance was submitted. This option is only available
+ * with Amazon EMR 5.1.0 and later and is the default for clusters created using that version.
+ * TERMINATE_AT_TASK_COMPLETION
indicates that Amazon EMR adds nodes to a deny
+ * list and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of
+ * the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes
+ * first and blocks instance termination if it could lead to HDFS corruption.
+ * TERMINATE_AT_TASK_COMPLETION
is available only in Amazon EMR version 4.1.0
+ * and later, and is the default for versions of Amazon EMR earlier than 5.1.0.
Specifies whether the cluster should terminate after completing all steps.
+ *Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon + * EBS-backed Linux AMI if the cluster uses a custom AMI.
*/ - AutoTerminate?: boolean; + CustomAmiId?: string; /** - *The applications installed on this cluster.
+ *The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for + * each EC2 instance. Available in Amazon EMR version 4.x and later.
*/ - Applications?: Application[]; + EbsRootVolumeSize?: number; /** - *Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true
, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false
, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true
when you create a cluster by using the VisibleToAllUsers
parameter of the RunJobFlow
action.
Applies only when CustomAmiID
is used. Specifies the type of updates that
+ * are applied from the Amazon Linux AMI package repositories when an instance boots using the
+ * AMI.
The name of the security configuration applied to the cluster.
+ *Attributes for Kerberos configuration when Kerberos authentication is enabled using a + * security configuration. For more information see Use Kerberos Authentication + * in the Amazon EMR Management Guide.
*/ - SecurityConfiguration?: string; + KerberosAttributes?: KerberosAttributes; /** - *Specifies the number of steps that can be executed concurrently.
+ *The Amazon Resource Name of the cluster.
*/ - StepConcurrencyLevel?: number; + ClusterArn?: string; /** - *Applies only when CustomAmiID
is used. Specifies the type of updates that are applied from the Amazon Linux AMI package repositories when an instance boots using the AMI.
The Amazon Resource Name (ARN) of the Outpost where the cluster is launched.
*/ - RepoUpgradeOnBoot?: RepoUpgradeOnBoot | string; + OutpostArn?: string; /** - *An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole
. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.
Specifies the number of steps that can be executed concurrently.
*/ - AutoScalingRole?: string; + StepConcurrencyLevel?: number; /** - *The unique identifier for the cluster.
+ *Placement group configured for an Amazon EMR cluster.
*/ - Id?: string; + PlacementGroups?: PlacementGroupConfig[]; } export namespace Cluster { @@ -4046,52 +5021,60 @@ export namespace Cluster { */ export interface InstanceGroupConfig { /** - *The bid price for each EC2 Spot instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
Friendly name given to the instance group.
*/ - BidPrice?: string; + Name?: string; /** - *An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.
+ *Market type of the EC2 instances used to create a cluster node.
*/ - AutoScalingPolicy?: AutoScalingPolicy; + Market?: MarketType | string; /** - *Amazon EMR releases 4.x or later.
- *The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).
+ *The role of the instance group in the cluster.
*/ - Configurations?: Configuration[]; + InstanceRole: InstanceRoleType | string | undefined; /** - *The role of the instance group in the cluster.
+ *The bid price for each EC2 Spot Instance type as defined by InstanceType
.
+ * Expressed in USD. If neither BidPrice
nor
+ * BidPriceAsPercentageOfOnDemandPrice
is provided,
+ * BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
Market type of the EC2 instances used to create a cluster node.
+ *The EC2 instance type for all instances in the instance group.
*/ - Market?: MarketType | string; + InstanceType: string | undefined; /** - *Friendly name given to the instance group.
+ *Target number of instances for the instance group.
*/ - Name?: string; + InstanceCount: number | undefined; /** - *The EC2 instance type for all instances in the instance group.
+ *Amazon EMR releases 4.x or later.
+ *The list of configurations supplied for an EMR cluster instance group. You can specify a + * separate configuration for each instance group (master, core, and task).
*/ - InstanceType: string | undefined; + Configurations?: Configuration[]; /** - *EBS configurations that will be attached to each EC2 instance in the instance group.
+ *EBS configurations that will be attached to each EC2 instance in the instance + * group.
*/ EbsConfiguration?: EbsConfiguration; /** - *Target number of instances for the instance group.
+ *An automatic scaling policy for a core instance group or task instance group in an + * Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically + * adds and terminates EC2 instances in response to the value of a CloudWatch metric. See + * PutAutoScalingPolicy.
*/ - InstanceCount: number | undefined; + AutoScalingPolicy?: AutoScalingPolicy; } export namespace InstanceGroupConfig { @@ -4105,19 +5088,20 @@ export namespace InstanceGroupConfig { */ export interface InstanceGroupModifyConfig { /** - *A list of new or modified configurations to apply for an instance group.
+ *Unique ID of the instance group to modify.
*/ - Configurations?: Configuration[]; + InstanceGroupId: string | undefined; /** - *The EC2 InstanceIds to terminate. After you terminate the instances, the instance group will not return to its original requested size.
+ *Target size for the instance group.
*/ - EC2InstanceIdsToTerminate?: string[]; + InstanceCount?: number; /** - *Unique ID of the instance group to expand or shrink.
+ *The EC2 InstanceIds to terminate. After you terminate the instances, the instance group + * will not return to its original requested size.
*/ - InstanceGroupId: string | undefined; + EC2InstanceIdsToTerminate?: string[]; /** *Policy for customizing shrink operations.
@@ -4125,9 +5109,9 @@ export interface InstanceGroupModifyConfig { ShrinkPolicy?: ShrinkPolicy; /** - *Target size for the instance group.
+ *A list of new or modified configurations to apply for an instance group.
*/ - InstanceCount?: number; + Configurations?: Configuration[]; } export namespace InstanceGroupModifyConfig { @@ -4137,45 +5121,56 @@ export namespace InstanceGroupModifyConfig { } /** - *An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. There can be a maximum of 5 instance type configurations in a fleet.
+ *An instance type configuration for each instance type in an instance fleet, which + * determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot + * target capacities. There can be a maximum of five instance type configurations in a + * fleet.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*An EC2 instance type, such as m3.xlarge
.
- *
An EC2 instance type, such as m3.xlarge
.
A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.
+ *The number of units that a provisioned instance of this type provides toward fulfilling + * the target capacities defined in InstanceFleetConfig. This value is 1 for + * a master instance fleet, and must be 1 or greater for core and task instance fleets. + * Defaults to 1 if not specified.
*/ - Configurations?: Configuration[]; + WeightedCapacity?: number; /** - *The configuration of Amazon Elastic Block Storage (EBS) attached to each instance as defined by InstanceType
.
- *
The bid price for each EC2 Spot Instance type as defined by InstanceType
.
+ * Expressed in USD. If neither BidPrice
nor
+ * BidPriceAsPercentageOfOnDemandPrice
is provided,
+ * BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig. This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified. - *
+ *The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined
+ * by InstanceType
. Expressed as a number (for example, 20 specifies 20%). If
+ * neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is
+ * provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
The bid price for each EC2 Spot instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
- *
The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance
+ * as defined by InstanceType
.
The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType
. Expressed as a number (for example, 20 specifies 20%). If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
A configuration classification that applies when provisioning cluster instances, which + * can include configurations for applications and software that run on the cluster.
*/ - BidPriceAsPercentageOfOnDemandPrice?: number; + Configurations?: Configuration[]; } export namespace InstanceTypeConfig { @@ -4187,44 +5182,53 @@ export namespace InstanceTypeConfig { /** *The configuration specification for each instance type in an instance fleet.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*The bid price for each EC2 Spot instance type as defined by InstanceType
. Expressed in USD.
The EC2 instance type, for example m3.xlarge
.
Evaluates to TRUE
when the specified InstanceType
is EBS-optimized.
The number of units that a provisioned instance of this type provides toward fulfilling + * the target capacities defined in InstanceFleetConfig. Capacity values + * represent performance characteristics such as vCPUs, memory, or I/O. If not specified, the + * default value is 1.
*/ - EbsOptimized?: boolean; + WeightedCapacity?: number; /** - *The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType
. Expressed as a number (for example, 20 specifies 20%).
The bid price for each EC2 Spot Instance type as defined by InstanceType
.
+ * Expressed in USD.
A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR.
+ *The bid price, as a percentage of On-Demand price, for each EC2 Spot Instance as defined
+ * by InstanceType
. Expressed as a number (for example, 20 specifies 20%).
The configuration of Amazon Elastic Block Storage (EBS) attached to each instance as defined by InstanceType
.
A configuration classification that applies when provisioning cluster instances, which + * can include configurations for applications and software bundled with Amazon EMR.
*/ - EbsBlockDevices?: EbsBlockDevice[]; + Configurations?: Configuration[]; /** - *The EC2 instance type, for example m3.xlarge
.
The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance
+ * as defined by InstanceType
.
The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig. Capacity values represent performance characteristics such as vCPUs, memory, or I/O. If not specified, the default value is 1.
+ *Evaluates to TRUE
when the specified InstanceType
is
+ * EBS-optimized.
Properties that describe the AWS principal that created the BlockPublicAccessConfiguration
using the PutBlockPublicAccessConfiguration
action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.
A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges
in the BlockPublicAccessConfiguration
. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating the block public access configuration to remove the exception.
A configuration for Amazon EMR block public access. The configuration applies to all
+ * clusters created in your account for the current Region. The configuration specifies
+ * whether block public access is enabled. If block public access is enabled, security groups
+ * associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or
+ * ::/0 on a port, unless the port is specified as an exception using
+ * PermittedPublicSecurityGroupRuleRanges
in the
+ * BlockPublicAccessConfiguration
. By default, Port 22 (SSH) is an exception,
+ * and public access is allowed on this port. You can change this by updating the block public
+ * access configuration to remove the exception.
For accounts that created clusters in a Region before November 25, 2019, block public access is disabled by default in that Region. To use this feature, you must manually enable and configure it. For accounts that did not create an EMR cluster in a Region before this date, block public access is enabled by default in that Region.
+ *For accounts that created clusters in a Region before November 25, 2019, block public + * access is disabled by default in that Region. To use this feature, you must manually + * enable and configure it. For accounts that did not create an EMR cluster in a Region + * before this date, block public access is enabled by default in that Region.
*Properties that describe the AWS principal that created the
+ * BlockPublicAccessConfiguration
using the
+ * PutBlockPublicAccessConfiguration
action as well as the date and time that
+ * the configuration was created. Each time a configuration for block public access is
+ * updated, Amazon EMR updates this metadata.
A configuration for Amazon EMR block public access. The configuration applies to all clusters created in your account for the current Region. The configuration specifies whether block public access is enabled. If block public access is enabled, security groups associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges
in the BlockPublicAccessConfiguration
. By default, Port 22 (SSH) is an exception, and public access is allowed on this port. You can change this by updating BlockPublicSecurityGroupRules
to remove the exception.
A configuration for Amazon EMR block public access. The configuration applies to all
+ * clusters created in your account for the current Region. The configuration specifies
+ * whether block public access is enabled. If block public access is enabled, security groups
+ * associated with the cluster cannot have rules that allow inbound traffic from 0.0.0.0/0 or
+ * ::/0 on a port, unless the port is specified as an exception using
+ * PermittedPublicSecurityGroupRuleRanges
in the
+ * BlockPublicAccessConfiguration
. By default, Port 22 (SSH) is an exception,
+ * and public access is allowed on this port. You can change this by updating
+ * BlockPublicSecurityGroupRules
to remove the exception.
For accounts that created clusters in a Region before November 25, 2019, block public access is disabled by default in that Region. To use this feature, you must manually enable and configure it. For accounts that did not create an EMR cluster in a Region before this date, block public access is enabled by default in that Region.
+ *For accounts that created clusters in a Region before November 25, 2019, block public + * access is disabled by default in that Region. To use this feature, you must manually + * enable and configure it. For accounts that did not create an EMR cluster in a Region + * before this date, block public access is enabled by default in that Region.
*Job flow in which to add the instance groups.
+ *Instance groups to add.
*/ - JobFlowId: string | undefined; + InstanceGroups: InstanceGroupConfig[] | undefined; /** - *Instance groups to add.
+ *Job flow in which to add the instance groups.
*/ - InstanceGroups: InstanceGroupConfig[] | undefined; + JobFlowId: string | undefined; } export namespace AddInstanceGroupsInput { @@ -4308,45 +5338,28 @@ export namespace AddInstanceGroupsInput { } /** - *Describes an instance fleet, which is a group of EC2 instances that host a particular node type (master, core, or task) in an Amazon EMR cluster. Instance fleets can consist of a mix of instance types and On-Demand and Spot instances, which are provisioned to meet a defined target capacity. - *
+ *Describes an instance fleet, which is a group of EC2 instances that host a particular + * node type (master, core, or task) in an Amazon EMR cluster. Instance fleets can consist of + * a mix of instance types and On-Demand and Spot Instances, which are provisioned to meet a + * defined target capacity.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*The specification for the instance types that comprise an instance fleet. Up to five unique instance specifications may be defined for each instance fleet. - *
- */ - InstanceTypeSpecifications?: InstanceTypeSpecification[]; - - /** - *A friendly name for the instance fleet.
- */ - Name?: string; - - /** - *The number of On-Demand units that have been provisioned for the instance fleet to fulfill TargetOnDemandCapacity
. This provisioned capacity might be less than or greater than TargetOnDemandCapacity
.
The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity
. When an On-Demand instance is provisioned, the WeightedCapacity
units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity
of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedOnDemandCapacity to determine the Spot capacity units that have been provisioned for the instance fleet.
If not specified or set to 0, only Spot instances are provisioned for the instance fleet using TargetSpotCapacity
. At least one of TargetSpotCapacity
and TargetOnDemandCapacity
should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity
and TargetOnDemandCapacity
can be specified, and its value must be 1.
The unique identifier of the instance fleet.
*/ - TargetOnDemandCapacity?: number; + Id?: string; /** - *The unique identifier of the instance fleet.
+ *A friendly name for the instance fleet.
*/ - Id?: string; + Name?: string; /** - *The current status of the instance fleet. - *
+ *The current status of the instance fleet.
*/ Status?: InstanceFleetStatus; @@ -4357,23 +5370,74 @@ export interface InstanceFleet { InstanceFleetType?: InstanceFleetType | string; /** - *Describes the launch specification for an instance fleet. - *
+ *The target capacity of On-Demand units for the instance fleet, which determines how many
+ * On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to
+ * provision On-Demand Instances as specified by InstanceTypeConfig. Each
+ * instance configuration has a specified WeightedCapacity
. When an On-Demand
+ * Instance is provisioned, the WeightedCapacity
units count toward the target
+ * capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled,
+ * even if this results in an overage. For example, if there are 2 units remaining to fulfill
+ * capacity, and Amazon EMR can only provision an instance with a
+ * WeightedCapacity
of 5 units, the instance is provisioned, and the target
+ * capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedOnDemandCapacity to determine the Spot capacity
+ * units that have been provisioned for the instance fleet.
If not specified or set to 0, only Spot Instances are provisioned for the instance
+ * fleet using TargetSpotCapacity
. At least one of
+ * TargetSpotCapacity
and TargetOnDemandCapacity
should be
+ * greater than 0. For a master instance fleet, only one of TargetSpotCapacity
+ * and TargetOnDemandCapacity
can be specified, and its value must be
+ * 1.
The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity
. When a Spot instance is provisioned, the WeightedCapacity
units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity
of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedSpotCapacity to determine the Spot capacity units that have been provisioned for the instance fleet.
The target capacity of Spot units for the instance fleet, which determines how many Spot
+ * instances to provision. When the instance fleet launches, Amazon EMR tries to provision
+ * Spot instances as specified by InstanceTypeConfig. Each instance
+ * configuration has a specified WeightedCapacity
. When a Spot instance is
+ * provisioned, the WeightedCapacity
units count toward the target capacity.
+ * Amazon EMR provisions instances until the target capacity is totally fulfilled, even if
+ * this results in an overage. For example, if there are 2 units remaining to fulfill
+ * capacity, and Amazon EMR can only provision an instance with a
+ * WeightedCapacity
of 5 units, the instance is provisioned, and the target
+ * capacity is exceeded by 3 units. You can use InstanceFleet$ProvisionedSpotCapacity to determine the Spot capacity units
+ * that have been provisioned for the instance fleet.
If not specified or set to 0, only On-Demand instances are provisioned for the instance fleet. At least one of TargetSpotCapacity
and TargetOnDemandCapacity
should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity
and TargetOnDemandCapacity
can be specified, and its value must be 1.
If not specified or set to 0, only On-Demand instances are provisioned for the
+ * instance fleet. At least one of TargetSpotCapacity
and
+ * TargetOnDemandCapacity
should be greater than 0. For a master instance
+ * fleet, only one of TargetSpotCapacity
and
+ * TargetOnDemandCapacity
can be specified, and its value must be 1.
The number of Spot units that have been provisioned for this instance fleet to fulfill TargetSpotCapacity
. This provisioned capacity might be less than or greater than TargetSpotCapacity
.
The number of On-Demand units that have been provisioned for the instance fleet to
+ * fulfill TargetOnDemandCapacity
. This provisioned capacity might be less than
+ * or greater than TargetOnDemandCapacity
.
The number of Spot units that have been provisioned for this instance fleet to fulfill
+ * TargetSpotCapacity
. This provisioned capacity might be less than or greater
+ * than TargetSpotCapacity
.
The specification for the instance types that comprise an instance fleet. Up to five + * unique instance specifications may be defined for each instance fleet.
+ */ + InstanceTypeSpecifications?: InstanceTypeSpecification[]; + + /** + *Describes the launch specification for an instance fleet.
+ */ + LaunchSpecifications?: InstanceFleetProvisioningSpecifications; } export namespace InstanceFleet { @@ -4385,45 +5449,75 @@ export namespace InstanceFleet { /** *The configuration that defines an instance fleet.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity
. When a Spot instance is provisioned, the WeightedCapacity
units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity
of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.
If not specified or set to 0, only On-Demand instances are provisioned for the instance fleet. At least one of TargetSpotCapacity
and TargetOnDemandCapacity
should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity
and TargetOnDemandCapacity
can be specified, and its value must be 1.
The friendly name of the instance fleet.
*/ - TargetSpotCapacity?: number; + Name?: string; /** - *The instance type configurations that define the EC2 instances in the instance fleet.
+ *The node type that the instance fleet hosts. Valid values are MASTER,CORE,and + * TASK.
*/ - InstanceTypeConfigs?: InstanceTypeConfig[]; + InstanceFleetType: InstanceFleetType | string | undefined; /** - *The friendly name of the instance fleet.
+ *The target capacity of On-Demand units for the instance fleet, which determines how many
+ * On-Demand Instances to provision. When the instance fleet launches, Amazon EMR tries to
+ * provision On-Demand Instances as specified by InstanceTypeConfig. Each
+ * instance configuration has a specified WeightedCapacity
. When an On-Demand
+ * Instance is provisioned, the WeightedCapacity
units count toward the target
+ * capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled,
+ * even if this results in an overage. For example, if there are 2 units remaining to fulfill
+ * capacity, and Amazon EMR can only provision an instance with a
+ * WeightedCapacity
of 5 units, the instance is provisioned, and the target
+ * capacity is exceeded by 3 units.
If not specified or set to 0, only Spot Instances are provisioned for the instance
+ * fleet using TargetSpotCapacity
. At least one of
+ * TargetSpotCapacity
and TargetOnDemandCapacity
should be
+ * greater than 0. For a master instance fleet, only one of TargetSpotCapacity
+ * and TargetOnDemandCapacity
can be specified, and its value must be
+ * 1.
The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand instances as specified by InstanceTypeConfig. Each instance configuration has a specified WeightedCapacity
. When an On-Demand instance is provisioned, the WeightedCapacity
units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity
of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.
The target capacity of Spot units for the instance fleet, which determines how many Spot
+ * Instances to provision. When the instance fleet launches, Amazon EMR tries to provision
+ * Spot Instances as specified by InstanceTypeConfig. Each instance
+ * configuration has a specified WeightedCapacity
. When a Spot Instance is
+ * provisioned, the WeightedCapacity
units count toward the target capacity.
+ * Amazon EMR provisions instances until the target capacity is totally fulfilled, even if
+ * this results in an overage. For example, if there are 2 units remaining to fulfill
+ * capacity, and Amazon EMR can only provision an instance with a
+ * WeightedCapacity
of 5 units, the instance is provisioned, and the target
+ * capacity is exceeded by 3 units.
If not specified or set to 0, only Spot instances are provisioned for the instance fleet using TargetSpotCapacity
. At least one of TargetSpotCapacity
and TargetOnDemandCapacity
should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity
and TargetOnDemandCapacity
can be specified, and its value must be 1.
If not specified or set to 0, only On-Demand Instances are provisioned for the
+ * instance fleet. At least one of TargetSpotCapacity
and
+ * TargetOnDemandCapacity
should be greater than 0. For a master instance
+ * fleet, only one of TargetSpotCapacity
and
+ * TargetOnDemandCapacity
can be specified, and its value must be 1.
The launch specification for the instance fleet.
+ *The instance type configurations that define the EC2 instances in the instance + * fleet.
*/ - LaunchSpecifications?: InstanceFleetProvisioningSpecifications; + InstanceTypeConfigs?: InstanceTypeConfig[]; /** - *The node type that the instance fleet hosts. Valid values are MASTER,CORE,and TASK.
+ *The launch specification for the instance fleet.
*/ - InstanceFleetType: InstanceFleetType | string | undefined; + LaunchSpecifications?: InstanceFleetProvisioningSpecifications; } export namespace InstanceFleetConfig { @@ -4472,98 +5566,110 @@ export namespace AddInstanceFleetInput { } /** - *This entity represents an instance group, which is a group of instances that have common purpose. For example, CORE instance group is used for HDFS.
+ *This entity represents an instance group, which is a group of instances that have common + * purpose. For example, CORE instance group is used for HDFS.
*/ export interface InstanceGroup { /** - *The target number of instances for the instance group.
+ *The identifier of the instance group.
*/ - RequestedInstanceCount?: number; + Id?: string; /** - *The identifier of the instance group.
+ *The name of the instance group.
*/ - Id?: string; + Name?: string; /** - *A list of configurations that were successfully applied for an instance group last time.
+ *The marketplace to provision instances for this group. Valid values are ON_DEMAND or + * SPOT.
*/ - LastSuccessfullyAppliedConfigurations?: Configuration[]; + Market?: MarketType | string; /** - *If the instance group is EBS-optimized. An Amazon EBS-optimized instance uses an optimized configuration stack and provides additional, dedicated capacity for Amazon EBS I/O.
+ *The type of the instance group. Valid values are MASTER, CORE or TASK.
*/ - EbsOptimized?: boolean; + InstanceGroupType?: InstanceGroupType | string; /** - *The version number of the requested configuration specification for this instance group.
+ *The bid price for each EC2 Spot Instance type as defined by InstanceType
.
+ * Expressed in USD. If neither BidPrice
nor
+ * BidPriceAsPercentageOfOnDemandPrice
is provided,
+ * BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
The EBS block devices that are mapped to this instance group.
+ *The EC2 instance type for all instances in the instance group.
*/ - EbsBlockDevices?: EbsBlockDevice[]; + InstanceType?: string; /** - *Policy for customizing shrink operations.
+ *The target number of instances for the instance group.
*/ - ShrinkPolicy?: ShrinkPolicy; + RequestedInstanceCount?: number; /** - *The version number of a configuration specification that was successfully applied for an instance group last time.
+ *The number of instances currently running in this instance group.
*/ - LastSuccessfullyAppliedConfigurationsVersion?: number; + RunningInstanceCount?: number; /** - *The name of the instance group.
+ *The current status of the instance group.
*/ - Name?: string; + Status?: InstanceGroupStatus; /** - *The marketplace to provision instances for this group. Valid values are ON_DEMAND or SPOT.
+ *Amazon EMR releases 4.x or later.
+ *The list of configurations supplied for an EMR cluster instance group. You can specify a + * separate configuration for each instance group (master, core, and task).
*/ - Market?: MarketType | string; + Configurations?: Configuration[]; /** - *The bid price for each EC2 Spot instance type as defined by InstanceType
. Expressed in
- * USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided,
- * BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
The version number of the requested configuration specification for this instance + * group.
*/ - BidPrice?: string; + ConfigurationsVersion?: number; /** - *The current status of the instance group.
+ *A list of configurations that were successfully applied for an instance group last + * time.
*/ - Status?: InstanceGroupStatus; + LastSuccessfullyAppliedConfigurations?: Configuration[]; /** - *An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy.
+ *The version number of a configuration specification that was successfully applied for an + * instance group last time.
*/ - AutoScalingPolicy?: AutoScalingPolicyDescription; + LastSuccessfullyAppliedConfigurationsVersion?: number; /** - *The EC2 instance type for all instances in the instance group.
+ *The EBS block devices that are mapped to this instance group.
*/ - InstanceType?: string; + EbsBlockDevices?: EbsBlockDevice[]; /** - *Amazon EMR releases 4.x or later.
- *The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).
+ *If the instance group is EBS-optimized. An Amazon EBS-optimized instance uses an + * optimized configuration stack and provides additional, dedicated capacity for Amazon EBS + * I/O.
*/ - Configurations?: Configuration[]; + EbsOptimized?: boolean; /** - *The type of the instance group. Valid values are MASTER, CORE or TASK.
+ *Policy for customizing shrink operations.
*/ - InstanceGroupType?: InstanceGroupType | string; + ShrinkPolicy?: ShrinkPolicy; /** - *The number of instances currently running in this instance group.
+ *An automatic scaling policy for a core instance group or task instance group in an + * Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically + * adds and terminates EC2 instances in response to the value of a CloudWatch metric. See + * PutAutoScalingPolicy.
*/ - RunningInstanceCount?: number; + AutoScalingPolicy?: AutoScalingPolicyDescription; } export namespace InstanceGroup { @@ -4595,14 +5701,14 @@ export namespace ListInstanceFleetsOutput { */ export interface ListInstanceGroupsOutput { /** - *The pagination token that indicates the next set of results to retrieve.
+ *The list of instance groups for the cluster and given filters.
*/ - Marker?: string; + InstanceGroups?: InstanceGroup[]; /** - *The list of instance groups for the cluster and given filters.
+ *The pagination token that indicates the next set of results to retrieve.
*/ - InstanceGroups?: InstanceGroup[]; + Marker?: string; } export namespace ListInstanceGroupsOutput { @@ -4612,18 +5718,21 @@ export namespace ListInstanceGroupsOutput { } /** - *A description of the Amazon EC2 instance on which the cluster (job flow) runs. A valid JobFlowInstancesConfig must contain either InstanceGroups or InstanceFleets, which is the recommended configuration. They cannot be used together. You may also have MasterInstanceType, SlaveInstanceType, and InstanceCount (all three must be present), but we don't recommend this configuration.
+ *A description of the Amazon EC2 instance on which the cluster (job flow) runs. A valid + * JobFlowInstancesConfig must contain either InstanceGroups or InstanceFleets. They cannot be + * used together. You may also have MasterInstanceType, SlaveInstanceType, and InstanceCount + * (all three must be present), but we don't recommend this configuration.
*/ export interface JobFlowInstancesConfig { /** - *The Availability Zone in which the cluster runs.
+ *The EC2 instance type of the master node.
*/ - Placement?: PlacementType; + MasterInstanceType?: string; /** - *Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.
+ *The EC2 instance type of the core and task nodes.
*/ - Ec2SubnetId?: string; + SlaveInstanceType?: string; /** *The number of EC2 instances in the cluster.
@@ -4631,80 +5740,96 @@ export interface JobFlowInstancesConfig { InstanceCount?: number; /** - *The identifier of the Amazon EC2 security group for the core and task nodes.
+ *Configuration for the instance groups in a cluster.
*/ - EmrManagedSlaveSecurityGroup?: string; + InstanceGroups?: InstanceGroupConfig[]; /** - *The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
+ *Describes the EC2 instances and instance configurations for clusters that use the + * instance fleet configuration.
*/ - ServiceAccessSecurityGroup?: string; + InstanceFleets?: InstanceFleetConfig[]; /** - *The identifier of the Amazon EC2 security group for the master node.
+ *The name of the EC2 key pair that can be used to connect to the master node using SSH as + * the user called "hadoop."
*/ - EmrManagedMasterSecurityGroup?: string; + Ec2KeyName?: string; /** - *A list of additional Amazon EC2 security group IDs for the master node.
+ *The Availability Zone in which the cluster runs.
*/ - AdditionalMasterSecurityGroups?: string[]; + Placement?: PlacementType; /** - *A list of additional Amazon EC2 security group IDs for the core and task nodes.
+ *Specifies whether the cluster should remain available after completing all steps.
*/ - AdditionalSlaveSecurityGroups?: string[]; + KeepJobFlowAliveWhenNoSteps?: boolean; /** - *Configuration for the instance groups in a cluster.
+ *Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being + * terminated by API call, user intervention, or in the event of a job-flow error.
*/ - InstanceGroups?: InstanceGroupConfig[]; + TerminationProtected?: boolean; /** - *The EC2 instance type of the core and task nodes.
+ *Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the
+ * cluster. Valid inputs are "0.18" (no longer maintained), "0.20" (no longer maintained),
+ * "0.20.205" (no longer maintained), "1.0.3", "2.2.0", or "2.4.0". If you do not set this
+ * value, the default of 0.18 is used, unless the AmiVersion
parameter is set in
+ * the RunJobFlow call, in which case the default version of Hadoop for that AMI version is
+ * used.
Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are "0.18" (deprecated), "0.20" (deprecated), "0.20.205" (deprecated), "1.0.3", "2.2.0", or "2.4.0". If you do not set this value, the default of 0.18 is used, unless the AmiVersion
parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.
Applies to clusters that use the uniform instance group configuration. To launch the + * cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier + * of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this + * value and your account supports EC2-Classic, the cluster launches in EC2-Classic.
*/ - HadoopVersion?: string; + Ec2SubnetId?: string; /** - *Applies to clusters that use the instance fleet configuration. When multiple EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances in the optimal subnet.
+ *Applies to clusters that use the instance fleet configuration. When multiple EC2 subnet + * IDs are specified, Amazon EMR evaluates them and launches instances in the optimal + * subnet.
*The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
+ *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and + * later, excluding 5.0.x versions.
*Specifies whether the cluster should remain available after completing all steps.
+ *The identifier of the Amazon EC2 security group for the master node.
*/ - KeepJobFlowAliveWhenNoSteps?: boolean; + EmrManagedMasterSecurityGroup?: string; /** - *Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.
+ *The identifier of the Amazon EC2 security group for the core and task nodes.
*/ - TerminationProtected?: boolean; + EmrManagedSlaveSecurityGroup?: string; /** - *The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
- *Describes the EC2 instances and instance configurations for clusters that use the instance fleet configuration.
+ *The identifier of the Amazon EC2 security group for the Amazon EMR service to access + * clusters in VPC private subnets.
*/ - InstanceFleets?: InstanceFleetConfig[]; + ServiceAccessSecurityGroup?: string; /** - *The name of the EC2 key pair that can be used to ssh to the master node as the user called "hadoop."
+ *A list of additional Amazon EC2 security group IDs for the master node.
*/ - Ec2KeyName?: string; + AdditionalMasterSecurityGroups?: string[]; /** - *The EC2 instance type of the master node.
+ *A list of additional Amazon EC2 security group IDs for the core and task nodes.
*/ - MasterInstanceType?: string; + AdditionalSlaveSecurityGroups?: string[]; } export namespace JobFlowInstancesConfig { @@ -4718,58 +5843,55 @@ export namespace JobFlowInstancesConfig { */ export interface RunJobFlowInput { /** - *A list of tags to associate with a cluster and propagate to Amazon EC2 instances.
+ *The name of the job flow.
*/ - Tags?: Tag[]; + Name: string | undefined; /** - *Specifies the number of steps that can be executed concurrently. The default value is 1
. The maximum value is 256
.
The location in Amazon S3 to write the log files of the job flow. If a value is not + * provided, logs are not created.
*/ - StepConcurrencyLevel?: number; + LogUri?: string; /** - *For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.
- *A list of strings that indicates third-party software to use. For more information, see the Amazon EMR Developer Guide. Currently supported values are:
- *"mapr-m3" - launch the job flow using MapR M3 Edition.
- *"mapr-m5" - launch the job flow using MapR M5 Edition.
- *The AWS KMS customer master key (CMK) used for encrypting log files. If a value is not + * provided, the logs remain encrypted by AES-256. This attribute is only available with + * Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.
*/ - SupportedProducts?: string[]; + LogEncryptionKmsKeyId?: string; /** - *- * The specified managed scaling policy for an Amazon EMR cluster. - *
+ *A JSON string for selecting additional features.
*/ - ManagedScalingPolicy?: ManagedScalingPolicy; + AdditionalInfo?: string; /** - *Also called instance profile and EC2 role. An IAM role for an EMR cluster. The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole
. In order to use the default role, you must have already created it using the CLI or console.
Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and
+ * later, ReleaseLabel
is used. To specify a custom AMI, use
+ * CustomAmiID
.
The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x
, where x.x.x is an Amazon EMR release version such as emr-5.14.0
. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion
.
The Amazon EMR release label, which determines the version of open-source application
+ * packages installed on the cluster. Release labels are in the form emr-x.x.x
,
+ * where x.x.x is an Amazon EMR release version such as emr-5.14.0
. For more
+ * information about Amazon EMR release versions and included application versions and
+ * features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release
+ * label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use
+ * AmiVersion
.
For Amazon EMR releases 4.0 and later. The list of configurations supplied for the EMR cluster you are creating.
+ *A specification of the number and type of Amazon EC2 instances.
*/ - Configurations?: Configuration[]; + Instances: JobFlowInstancesConfig | undefined; /** - *Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when it launches cluster EC2 instances. For more information about custom AMIs in Amazon EMR, see Using a Custom AMI in the Amazon EMR Management Guide. If omitted, the cluster uses the base Linux AMI for the ReleaseLabel
specified. For Amazon EMR versions 2.x and 3.x, use AmiVersion
instead.
For information about creating a custom AMI, see Creating an Amazon EBS-Backed Linux AMI in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For information about finding an AMI ID, see Finding a Linux AMI.
+ *A list of steps to run.
*/ - CustomAmiId?: string; + Steps?: StepConfig[]; /** *A list of bootstrap actions to run before Hadoop starts on the cluster nodes.
@@ -4777,30 +5899,34 @@ export interface RunJobFlowInput { BootstrapActions?: BootstrapActionConfig[]; /** - *The specified placement group configuration for an Amazon EMR cluster.
- */ - PlacementGroupConfigs?: PlacementGroupConfig[]; - - /** - *The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.
- */ - EbsRootVolumeSize?: number; - - /** - *Applies to Amazon EMR releases 4.0 and later. A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster. For a list of applications available for each Amazon EMR release version, see the Amazon EMR Release Guide.
- */ - Applications?: Application[]; - - /** - *Specifies the way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR
indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION
indicates that Amazon EMR blacklists and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION
available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.
For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use + * Applications.
+ *A list of strings that indicates third-party software to use. For more information, see + * the Amazon EMR + * Developer Guide. Currently supported values are:
+ *"mapr-m3" - launch the job flow using MapR M3 Edition.
+ *"mapr-m5" - launch the job flow using MapR M5 Edition.
+ *For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.
+ *For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use + * Applications.
*A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see "Launch a Job Flow on the MapR Distribution for Hadoop" in the Amazon EMR Developer Guide. Supported values are:
+ *A list of strings that indicates third-party software to use with the job flow that + * accepts a user argument list. EMR accepts and forwards the argument list to the + * corresponding installation script as bootstrap action arguments. For more information, see + * "Launch a Job Flow on the MapR Distribution for Hadoop" in the Amazon EMR Developer Guide. Supported + * values are:
*"mapr-m3" - launch the cluster using MapR M3 Edition.
@@ -4809,7 +5935,8 @@ export interface RunJobFlowInput { *"mapr-m5" - launch the cluster using MapR M5 Edition.
*"mapr" with the user arguments specifying "--edition,m3" or "--edition,m5" - launch the job flow using MapR M3 or M5 Edition respectively.
+ *"mapr" with the user arguments specifying "--edition,m3" or "--edition,m5" - + * launch the job flow using MapR M3 or M5 Edition respectively.
*"mapr-m7" - launch the cluster using MapR M7 Edition.
@@ -4824,76 +5951,134 @@ export interface RunJobFlowInput { *"spark" - launch the cluster with Apache Spark installed.
*"ganglia" - launch the cluster with the Ganglia Monitoring System installed.
+ *"ganglia" - launch the cluster with the Ganglia Monitoring System + * installed.
*A value of true
indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false
indicates that only the IAM user who created the cluster can perform actions.
Applies to Amazon EMR releases 4.0 and later. A case-insensitive list of applications + * for Amazon EMR to install and configure when launching the cluster. For a list of + * applications available for each Amazon EMR release version, see the Amazon EMR Release + * Guide.
+ */ + Applications?: Application[]; + + /** + *For Amazon EMR releases 4.0 and later. The list of configurations supplied for the EMR + * cluster you are creating.
+ */ + Configurations?: Configuration[]; + + /** + *A value of true
indicates that all IAM users in the AWS account can perform
+ * cluster actions if they have the proper IAM policy permissions. This is the default. A
+ * value of false
indicates that only the IAM user who created the cluster can
+ * perform actions.
The location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created.
+ *Also called instance profile and EC2 role. An IAM role for an EMR cluster. The EC2
+ * instances of the cluster assume this role. The default role is
+ * EMR_EC2_DefaultRole
. In order to use the default role, you must have
+ * already created it using the CLI or console.
The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
+ *The IAM role that will be assumed by the Amazon EMR service to access AWS resources on + * your behalf.
*/ ServiceRole?: string; + /** + *A list of tags to associate with a cluster and propagate to Amazon EC2 instances.
+ */ + Tags?: Tag[]; + /** *The name of a security configuration to apply to the cluster.
*/ SecurityConfiguration?: string; /** - *An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole
. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.
An IAM role for automatic scaling policies. The default role is
+ * EMR_AutoScaling_DefaultRole
. The IAM role provides permissions that the
+ * automatic scaling feature requires to launch and terminate EC2 instances in an instance
+ * group.
Applies only when CustomAmiID
is used. Specifies which updates from the Amazon Linux AMI package repositories to apply automatically when the instance boots using the AMI. If omitted, the default is SECURITY
, which indicates that only security updates are applied. If NONE
is specified, no updates are applied, and all updates must be applied manually.
Specifies the way that individual Amazon EC2 instances terminate when an automatic
+ * scale-in activity occurs or an instance group is resized.
+ * TERMINATE_AT_INSTANCE_HOUR
indicates that Amazon EMR terminates nodes at
+ * the instance-hour boundary, regardless of when the request to terminate the instance was
+ * submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default
+ * for clusters created using that version. TERMINATE_AT_TASK_COMPLETION
+ * indicates that Amazon EMR adds nodes to a deny list and drains tasks from nodes before
+ * terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either
+ * behavior, Amazon EMR removes the least active nodes first and blocks instance termination
+ * if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION
available
+ * only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR
+ * earlier than 5.1.0.
The AWS KMS customer master key (CMK) used for encrypting log files. If a value is not provided, the logs will remain encrypted by AES-256. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.
+ *Available only in Amazon EMR version 5.7.0 and later. The ID of a custom Amazon
+ * EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when it launches cluster EC2
+ * instances. For more information about custom AMIs in Amazon EMR, see Using a Custom
+ * AMI in the Amazon EMR Management Guide. If omitted, the
+ * cluster uses the base Linux AMI for the ReleaseLabel
specified. For Amazon EMR
+ * versions 2.x and 3.x, use AmiVersion
instead.
For information about creating a custom AMI, see Creating an Amazon EBS-Backed + * Linux AMI in the Amazon Elastic Compute Cloud User Guide for Linux + * Instances. For information about finding an AMI ID, see Finding a Linux + * AMI.
*/ - LogEncryptionKmsKeyId?: string; + CustomAmiId?: string; /** - *A list of steps to run.
+ *The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for + * each EC2 instance. Available in Amazon EMR version 4.x and later.
*/ - Steps?: StepConfig[]; + EbsRootVolumeSize?: number; /** - *Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration. For more information see Use Kerberos Authentication in the EMR Management Guide.
+ *Applies only when CustomAmiID
is used. Specifies which updates from the
+ * Amazon Linux AMI package repositories to apply automatically when the instance boots using
+ * the AMI. If omitted, the default is SECURITY
, which indicates that only
+ * security updates are applied. If NONE
is specified, no updates are applied,
+ * and all updates must be applied manually.
The name of the job flow.
+ *Attributes for Kerberos configuration when Kerberos authentication is enabled using a + * security configuration. For more information see Use Kerberos Authentication + * in the Amazon EMR Management Guide.
*/ - Name: string | undefined; + KerberosAttributes?: KerberosAttributes; /** - *Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases 4.0 and later, ReleaseLabel
is used. To specify a custom AMI, use CustomAmiID
.
Specifies the number of steps that can be executed concurrently. The default value is
+ * 1
. The maximum value is 256
.
A JSON string for selecting additional features.
+ *The specified managed scaling policy for an Amazon EMR cluster.
*/ - AdditionalInfo?: string; + ManagedScalingPolicy?: ManagedScalingPolicy; /** - *A specification of the number and type of Amazon EC2 instances.
+ *The specified placement group configuration for an Amazon EMR cluster.
*/ - Instances: JobFlowInstancesConfig | undefined; + PlacementGroupConfigs?: PlacementGroupConfig[]; } export namespace RunJobFlowInput { diff --git a/clients/client-emr/pagination/ListStudioSessionMappingsPaginator.ts b/clients/client-emr/pagination/ListStudioSessionMappingsPaginator.ts new file mode 100644 index 000000000000..74ec7b9b18f9 --- /dev/null +++ b/clients/client-emr/pagination/ListStudioSessionMappingsPaginator.ts @@ -0,0 +1,56 @@ +import { EMR } from "../EMR"; +import { EMRClient } from "../EMRClient"; +import { + ListStudioSessionMappingsCommand, + ListStudioSessionMappingsCommandInput, + ListStudioSessionMappingsCommandOutput, +} from "../commands/ListStudioSessionMappingsCommand"; +import { EMRPaginationConfiguration } from "./Interfaces"; +import { Paginator } from "@aws-sdk/types"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: EMRClient, + input: ListStudioSessionMappingsCommandInput, + ...args: any +): Promise