diff --git a/.changes/2.1262.0.json b/.changes/2.1262.0.json new file mode 100644 index 0000000000..b5cc591fd6 --- /dev/null +++ b/.changes/2.1262.0.json @@ -0,0 +1,102 @@ +[ + { + "type": "bugfix", + "category": "ResourceExplorer2", + "description": "Add dualstack by default for FIPS" + }, + { + "type": "bugfix", + "category": "Signer", + "description": "Set Authorization header correctly in Bearer Signer" + }, + { + "type": "bugfix", + "category": "Signer", + "description": "Read identity type from service.api.signatureVersion" + }, + { + "type": "feature", + "category": "Backup", + "description": "AWS Backup introduces support for legal hold and application stack backups. AWS Backup Audit Manager introduces support for cross-Region, cross-account reports." + }, + { + "type": "feature", + "category": "CloudWatch", + "description": "Adds cross-account support to the GetMetricData API. Adds cross-account support to the ListMetrics API through the usage of the IncludeLinkedAccounts flag and the new OwningAccounts field." + }, + { + "type": "feature", + "category": "CloudWatchLogs", + "description": "Updates to support CloudWatch Logs data protection and CloudWatch cross-account observability" + }, + { + "type": "feature", + "category": "Drs", + "description": "Non breaking changes to existing APIs, and additional APIs added to support in-AWS failing back using AWS Elastic Disaster Recovery." + }, + { + "type": "feature", + "category": "ECS", + "description": "This release adds support for ECS Service Connect, a new capability that simplifies writing and operating resilient distributed applications. This release updates the TaskDefinition, Cluster, Service mutation APIs with Service connect constructs and also adds a new ListServicesByNamespace API." + }, + { + "type": "feature", + "category": "EFS", + "description": "This release adds elastic as a new ThroughputMode value for EFS file systems and adds AFTER_1_DAY as a value for TransitionToIARules." + }, + { + "type": "feature", + "category": "Endpoint", + "description": "Add pattern global dualstack by default" + }, + { + "type": "feature", + "category": "IoTWireless", + "description": "This release includes a new feature for customers to calculate the position of their devices by adding three new APIs: UpdateResourcePosition, GetResourcePosition, and GetPositionEstimate." + }, + { + "type": "feature", + "category": "Iot", + "description": "Job scheduling enables the scheduled rollout of a Job with start and end times and a customizable end behavior when end time is reached. This is available for continuous and snapshot jobs. Added support for MQTT5 properties to AWS IoT TopicRule Republish Action." + }, + { + "type": "feature", + "category": "IotData", + "description": "This release adds support for MQTT5 properties to AWS IoT HTTP Publish API." + }, + { + "type": "feature", + "category": "Kendra", + "description": "Amazon Kendra now supports preview of table information from HTML tables in the search results. The most relevant cells with their corresponding rows, columns are displayed as a preview in the search result. The most relevant table cell or cells are also highlighted in table preview." + }, + { + "type": "feature", + "category": "Mgn", + "description": "This release adds support for Application and Wave management. We also now support custom post-launch actions." + }, + { + "type": "feature", + "category": "OAM", + "description": "Amazon CloudWatch Observability Access Manager is a new service that allows configuration of the CloudWatch cross-account observability feature." + }, + { + "type": "feature", + "category": "Organizations", + "description": "This release introduces delegated administrator for AWS Organizations, a new feature to help you delegate the management of your Organizations policies, enabling you to govern your AWS organization in a decentralized way. You can now allow member accounts to manage Organizations policies." + }, + { + "type": "feature", + "category": "RDS", + "description": "This release enables new Aurora and RDS feature called Blue/Green Deployments that makes updates to databases safer, simpler and faster." + }, + { + "type": "feature", + "category": "Textract", + "description": "This release adds support for classifying and splitting lending documents by type, and extracting information by using the Analyze Lending APIs. This release also includes support for summarized information of the processed lending document package, in addition to per document results." + }, + { + "type": "feature", + "category": "TranscribeService", + "description": "This release adds support for 'inputType' for post-call and real-time (streaming) Call Analytics within Amazon Transcribe." + } +] \ No newline at end of file diff --git a/.changes/next-release/bugfix-ResourceExplorer2-af0b0824.json b/.changes/next-release/bugfix-ResourceExplorer2-af0b0824.json deleted file mode 100644 index d95378295f..0000000000 --- a/.changes/next-release/bugfix-ResourceExplorer2-af0b0824.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "bugfix", - "category": "ResourceExplorer2", - "description": "Add dualstack by default for FIPS" -} \ No newline at end of file diff --git a/.changes/next-release/bugfix-Signer-19c64295.json b/.changes/next-release/bugfix-Signer-19c64295.json deleted file mode 100644 index 34263b7338..0000000000 --- a/.changes/next-release/bugfix-Signer-19c64295.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "bugfix", - "category": "Signer", - "description": "Set Authorization header correctly in Bearer Signer" -} \ No newline at end of file diff --git a/.changes/next-release/bugfix-Signer-57f3328c.json b/.changes/next-release/bugfix-Signer-57f3328c.json deleted file mode 100644 index 6e27d7f0df..0000000000 --- a/.changes/next-release/bugfix-Signer-57f3328c.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "bugfix", - "category": "Signer", - "description": "Read identity type from service.api.signatureVersion" -} \ No newline at end of file diff --git a/.changes/next-release/feature-Endpoint-cbd3413f.json b/.changes/next-release/feature-Endpoint-cbd3413f.json deleted file mode 100644 index 176ced7b03..0000000000 --- a/.changes/next-release/feature-Endpoint-cbd3413f.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "feature", - "category": "Endpoint", - "description": "Add pattern global dualstack by default" -} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 230ac92083..34a1c1d42f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,29 @@ # Changelog for AWS SDK for JavaScript - + +## 2.1262.0 +* bugfix: ResourceExplorer2: Add dualstack by default for FIPS +* bugfix: Signer: Set Authorization header correctly in Bearer Signer +* bugfix: Signer: Read identity type from service.api.signatureVersion +* feature: Backup: AWS Backup introduces support for legal hold and application stack backups. AWS Backup Audit Manager introduces support for cross-Region, cross-account reports. +* feature: CloudWatch: Adds cross-account support to the GetMetricData API. Adds cross-account support to the ListMetrics API through the usage of the IncludeLinkedAccounts flag and the new OwningAccounts field. +* feature: CloudWatchLogs: Updates to support CloudWatch Logs data protection and CloudWatch cross-account observability +* feature: Drs: Non breaking changes to existing APIs, and additional APIs added to support in-AWS failing back using AWS Elastic Disaster Recovery. +* feature: ECS: This release adds support for ECS Service Connect, a new capability that simplifies writing and operating resilient distributed applications. This release updates the TaskDefinition, Cluster, Service mutation APIs with Service connect constructs and also adds a new ListServicesByNamespace API. +* feature: EFS: This release adds elastic as a new ThroughputMode value for EFS file systems and adds AFTER_1_DAY as a value for TransitionToIARules. +* feature: Endpoint: Add pattern global dualstack by default +* feature: IoTWireless: This release includes a new feature for customers to calculate the position of their devices by adding three new APIs: UpdateResourcePosition, GetResourcePosition, and GetPositionEstimate. +* feature: Iot: Job scheduling enables the scheduled rollout of a Job with start and end times and a customizable end behavior when end time is reached. This is available for continuous and snapshot jobs. Added support for MQTT5 properties to AWS IoT TopicRule Republish Action. +* feature: IotData: This release adds support for MQTT5 properties to AWS IoT HTTP Publish API. +* feature: Kendra: Amazon Kendra now supports preview of table information from HTML tables in the search results. The most relevant cells with their corresponding rows, columns are displayed as a preview in the search result. The most relevant table cell or cells are also highlighted in table preview. +* feature: Mgn: This release adds support for Application and Wave management. We also now support custom post-launch actions. +* feature: OAM: Amazon CloudWatch Observability Access Manager is a new service that allows configuration of the CloudWatch cross-account observability feature. +* feature: Organizations: This release introduces delegated administrator for AWS Organizations, a new feature to help you delegate the management of your Organizations policies, enabling you to govern your AWS organization in a decentralized way. You can now allow member accounts to manage Organizations policies. +* feature: RDS: This release enables new Aurora and RDS feature called Blue/Green Deployments that makes updates to databases safer, simpler and faster. +* feature: Textract: This release adds support for classifying and splitting lending documents by type, and extracting information by using the Analyze Lending APIs. This release also includes support for summarized information of the processed lending document package, in addition to per document results. +* feature: TranscribeService: This release adds support for 'inputType' for post-call and real-time (streaming) Call Analytics within Amazon Transcribe. + ## 2.1261.0 * feature: Grafana: This release includes support for configuring a Grafana workspace to connect to a datasource within a VPC as well as new APIs for configuring Grafana settings. * feature: Rbin: This release adds support for Rule Lock for Recycle Bin, which allows you to lock retention rules so that they can no longer be modified or deleted. diff --git a/README.md b/README.md index f31383ef3d..cdb32604f2 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ for further details. To use the SDK in the browser, simply add the following script tag to your HTML pages: - + You can also build a custom browser SDK with your specified set of AWS services. This can allow you to reduce the SDK's size, specify different API versions of diff --git a/apis/backup-2018-11-15.min.json b/apis/backup-2018-11-15.min.json index 664281c4ea..fd11d12228 100644 --- a/apis/backup-2018-11-15.min.json +++ b/apis/backup-2018-11-15.min.json @@ -11,6 +11,40 @@ "uid": "backup-2018-11-15" }, "operations": { + "CancelLegalHold": { + "http": { + "method": "DELETE", + "requestUri": "/legal-holds/{legalHoldId}", + "responseCode": 201 + }, + "input": { + "type": "structure", + "required": [ + "LegalHoldId", + "CancelDescription" + ], + "members": { + "LegalHoldId": { + "location": "uri", + "locationName": "legalHoldId" + }, + "CancelDescription": { + "location": "querystring", + "locationName": "cancelDescription" + }, + "RetainRecordInDays": { + "location": "querystring", + "locationName": "retainRecordInDays", + "type": "long" + } + } + }, + "output": { + "type": "structure", + "members": {} + }, + "idempotent": true + }, "CreateBackupPlan": { "http": { "method": "PUT", @@ -23,10 +57,10 @@ ], "members": { "BackupPlan": { - "shape": "S2" + "shape": "S6" }, "BackupPlanTags": { - "shape": "Sc" + "shape": "Sf" }, "CreatorRequestId": {} } @@ -41,7 +75,7 @@ }, "VersionId": {}, "AdvancedBackupSettings": { - "shape": "Sj" + "shape": "Sm" } } }, @@ -64,7 +98,7 @@ "locationName": "backupPlanId" }, "BackupSelection": { - "shape": "St" + "shape": "Sv" }, "CreatorRequestId": {} } @@ -97,7 +131,7 @@ "locationName": "backupVaultName" }, "BackupVaultTags": { - "shape": "Sc" + "shape": "Sf" }, "EncryptionKeyArn": {}, "CreatorRequestId": {} @@ -129,13 +163,13 @@ "FrameworkName": {}, "FrameworkDescription": {}, "FrameworkControls": { - "shape": "S1b" + "shape": "S1d" }, "IdempotencyToken": { "idempotencyToken": true }, "FrameworkTags": { - "shape": "S1l" + "shape": "S1n" } } }, @@ -148,6 +182,46 @@ }, "idempotent": true }, + "CreateLegalHold": { + "http": { + "requestUri": "/legal-holds/" + }, + "input": { + "type": "structure", + "required": [ + "Title", + "Description" + ], + "members": { + "Title": {}, + "Description": {}, + "IdempotencyToken": {}, + "RecoveryPointSelection": { + "shape": "S1q" + }, + "Tags": { + "shape": "Sf" + } + } + }, + "output": { + "type": "structure", + "members": { + "Title": {}, + "Status": {}, + "Description": {}, + "LegalHoldId": {}, + "LegalHoldArn": {}, + "CreationDate": { + "type": "timestamp" + }, + "RecoveryPointSelection": { + "shape": "S1q" + } + } + }, + "idempotent": true + }, "CreateReportPlan": { "http": { "requestUri": "/audit/report-plans" @@ -163,13 +237,13 @@ "ReportPlanName": {}, "ReportPlanDescription": {}, "ReportDeliveryChannel": { - "shape": "S1q" + "shape": "S1z" }, "ReportSetting": { - "shape": "S1s" + "shape": "S21" }, "ReportPlanTags": { - "shape": "S1l" + "shape": "S1n" }, "IdempotencyToken": { "idempotencyToken": true @@ -416,7 +490,7 @@ }, "IamRoleArn": {}, "CreatedBy": { - "shape": "S2a" + "shape": "S2j" }, "ResourceType": {}, "BytesTransferred": { @@ -429,9 +503,23 @@ "type": "timestamp" }, "BackupOptions": { - "shape": "Sm" + "shape": "Sp" }, - "BackupType": {} + "BackupType": {}, + "ParentJobId": {}, + "IsParent": { + "type": "boolean" + }, + "NumberOfChildJobs": { + "type": "long" + }, + "ChildJobsInState": { + "type": "map", + "key": {}, + "value": { + "type": "long" + } + } } }, "idempotent": true @@ -503,7 +591,7 @@ "type": "structure", "members": { "CopyJob": { - "shape": "S2g" + "shape": "S2r" } } }, @@ -533,7 +621,7 @@ "FrameworkArn": {}, "FrameworkDescription": {}, "FrameworkControls": { - "shape": "S1b" + "shape": "S1d" }, "CreationTime": { "type": "timestamp" @@ -557,7 +645,7 @@ "type": "structure", "members": { "GlobalSettings": { - "shape": "S2m" + "shape": "S2y" }, "LastUpdateTime": { "type": "timestamp" @@ -626,7 +714,7 @@ "ResourceArn": {}, "ResourceType": {}, "CreatedBy": { - "shape": "S2a" + "shape": "S2j" }, "IamRoleArn": {}, "Status": {}, @@ -641,10 +729,10 @@ "type": "long" }, "CalculatedLifecycle": { - "shape": "S2u" + "shape": "S36" }, "Lifecycle": { - "shape": "Sa" + "shape": "Se" }, "EncryptionKeyArn": {}, "IsEncrypted": { @@ -653,6 +741,11 @@ "StorageClass": {}, "LastRestoreTime": { "type": "timestamp" + }, + "ParentRecoveryPointArn": {}, + "CompositeMemberIdentifier": {}, + "IsParent": { + "type": "boolean" } } }, @@ -671,10 +764,10 @@ "type": "structure", "members": { "ResourceTypeOptInPreference": { - "shape": "S2z" + "shape": "S3a" }, "ResourceTypeManagementPreference": { - "shape": "S31" + "shape": "S3c" } } } @@ -700,7 +793,7 @@ "type": "structure", "members": { "ReportJob": { - "shape": "S35" + "shape": "S3g" } } } @@ -726,7 +819,7 @@ "type": "structure", "members": { "ReportPlan": { - "shape": "S39" + "shape": "S3k" } } } @@ -798,6 +891,30 @@ } } }, + "DisassociateRecoveryPointFromParent": { + "http": { + "method": "DELETE", + "requestUri": "/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}/parentAssociation", + "responseCode": 204 + }, + "input": { + "type": "structure", + "required": [ + "BackupVaultName", + "RecoveryPointArn" + ], + "members": { + "BackupVaultName": { + "location": "uri", + "locationName": "backupVaultName" + }, + "RecoveryPointArn": { + "location": "uri", + "locationName": "recoveryPointArn" + } + } + } + }, "ExportBackupPlanTemplate": { "http": { "method": "GET", @@ -847,7 +964,7 @@ "type": "structure", "members": { "BackupPlan": { - "shape": "S3j" + "shape": "S3v" }, "BackupPlanId": {}, "BackupPlanArn": {}, @@ -863,7 +980,7 @@ "type": "timestamp" }, "AdvancedBackupSettings": { - "shape": "Sj" + "shape": "Sm" } } }, @@ -886,7 +1003,7 @@ "type": "structure", "members": { "BackupPlan": { - "shape": "S3j" + "shape": "S3v" } } } @@ -912,7 +1029,7 @@ "type": "structure", "members": { "BackupPlanDocument": { - "shape": "S3j" + "shape": "S3v" } } } @@ -943,7 +1060,7 @@ "type": "structure", "members": { "BackupSelection": { - "shape": "St" + "shape": "Sv" }, "SelectionId": {}, "BackupPlanId": {}, @@ -1006,7 +1123,49 @@ "BackupVaultArn": {}, "SNSTopicArn": {}, "BackupVaultEvents": { - "shape": "S3x" + "shape": "S49" + } + } + }, + "idempotent": true + }, + "GetLegalHold": { + "http": { + "method": "GET", + "requestUri": "/legal-holds/{legalHoldId}/" + }, + "input": { + "type": "structure", + "required": [ + "LegalHoldId" + ], + "members": { + "LegalHoldId": { + "location": "uri", + "locationName": "legalHoldId" + } + } + }, + "output": { + "type": "structure", + "members": { + "Title": {}, + "Status": {}, + "Description": {}, + "CancelDescription": {}, + "LegalHoldId": {}, + "LegalHoldArn": {}, + "CreationDate": { + "type": "timestamp" + }, + "CancellationDate": { + "type": "timestamp" + }, + "RetainRecordUntil": { + "type": "timestamp" + }, + "RecoveryPointSelection": { + "shape": "S1q" } } }, @@ -1040,7 +1199,7 @@ "BackupVaultArn": {}, "RecoveryPointArn": {}, "RestoreMetadata": { - "shape": "S41" + "shape": "S4f" } } }, @@ -1117,6 +1276,10 @@ "location": "querystring", "locationName": "completeBefore", "type": "timestamp" + }, + "ByParentJobId": { + "location": "querystring", + "locationName": "parentJobId" } } }, @@ -1148,7 +1311,7 @@ }, "IamRoleArn": {}, "CreatedBy": { - "shape": "S2a" + "shape": "S2j" }, "ExpectedCompletionDate": { "type": "timestamp" @@ -1161,9 +1324,13 @@ "type": "long" }, "BackupOptions": { - "shape": "Sm" + "shape": "Sp" }, - "BackupType": {} + "BackupType": {}, + "ParentJobId": {}, + "IsParent": { + "type": "boolean" + } } } }, @@ -1241,7 +1408,7 @@ "BackupPlanVersionsList": { "type": "list", "member": { - "shape": "S4i" + "shape": "S4w" } } } @@ -1279,7 +1446,7 @@ "BackupPlansList": { "type": "list", "member": { - "shape": "S4i" + "shape": "S4w" } } } @@ -1449,6 +1616,10 @@ "location": "querystring", "locationName": "completeAfter", "type": "timestamp" + }, + "ByParentJobId": { + "location": "querystring", + "locationName": "parentJobId" } } }, @@ -1458,7 +1629,7 @@ "CopyJobs": { "type": "list", "member": { - "shape": "S2g" + "shape": "S2r" } }, "NextToken": {} @@ -1509,6 +1680,52 @@ } } }, + "ListLegalHolds": { + "http": { + "method": "GET", + "requestUri": "/legal-holds/" + }, + "input": { + "type": "structure", + "members": { + "NextToken": { + "location": "querystring", + "locationName": "nextToken" + }, + "MaxResults": { + "location": "querystring", + "locationName": "maxResults", + "type": "integer" + } + } + }, + "output": { + "type": "structure", + "members": { + "NextToken": {}, + "LegalHolds": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Title": {}, + "Status": {}, + "Description": {}, + "LegalHoldId": {}, + "LegalHoldArn": {}, + "CreationDate": { + "type": "timestamp" + }, + "CancellationDate": { + "type": "timestamp" + } + } + } + } + } + }, + "idempotent": true + }, "ListProtectedResources": { "http": { "method": "GET", @@ -1594,6 +1811,10 @@ "location": "querystring", "locationName": "createdAfter", "type": "timestamp" + }, + "ByParentRecoveryPointArn": { + "location": "querystring", + "locationName": "parentRecoveryPointArn" } } }, @@ -1613,7 +1834,7 @@ "ResourceArn": {}, "ResourceType": {}, "CreatedBy": { - "shape": "S2a" + "shape": "S2j" }, "IamRoleArn": {}, "Status": {}, @@ -1628,10 +1849,10 @@ "type": "long" }, "CalculatedLifecycle": { - "shape": "S2u" + "shape": "S36" }, "Lifecycle": { - "shape": "Sa" + "shape": "Se" }, "EncryptionKeyArn": {}, "IsEncrypted": { @@ -1639,6 +1860,11 @@ }, "LastRestoreTime": { "type": "timestamp" + }, + "ParentRecoveryPointArn": {}, + "CompositeMemberIdentifier": {}, + "IsParent": { + "type": "boolean" } } } @@ -1647,6 +1873,49 @@ }, "idempotent": true }, + "ListRecoveryPointsByLegalHold": { + "http": { + "method": "GET", + "requestUri": "/legal-holds/{legalHoldId}/recovery-points" + }, + "input": { + "type": "structure", + "required": [ + "LegalHoldId" + ], + "members": { + "LegalHoldId": { + "location": "uri", + "locationName": "legalHoldId" + }, + "NextToken": { + "location": "querystring", + "locationName": "nextToken" + }, + "MaxResults": { + "location": "querystring", + "locationName": "maxResults", + "type": "integer" + } + } + }, + "output": { + "type": "structure", + "members": { + "RecoveryPoints": { + "type": "list", + "member": { + "type": "structure", + "members": { + "RecoveryPointArn": {} + } + } + }, + "NextToken": {} + } + }, + "idempotent": true + }, "ListRecoveryPointsByResource": { "http": { "method": "GET", @@ -1692,7 +1961,11 @@ "BackupSizeBytes": { "type": "long" }, - "BackupVaultName": {} + "BackupVaultName": {}, + "IsParent": { + "type": "boolean" + }, + "ParentRecoveryPointArn": {} } } } @@ -1743,7 +2016,7 @@ "ReportJobs": { "type": "list", "member": { - "shape": "S35" + "shape": "S3g" } }, "NextToken": {} @@ -1775,7 +2048,7 @@ "ReportPlans": { "type": "list", "member": { - "shape": "S39" + "shape": "S3k" } }, "NextToken": {} @@ -1897,7 +2170,7 @@ "members": { "NextToken": {}, "Tags": { - "shape": "Sc" + "shape": "Sf" } } }, @@ -1970,7 +2243,7 @@ }, "SNSTopicArn": {}, "BackupVaultEvents": { - "shape": "S3x" + "shape": "S49" } } }, @@ -2000,13 +2273,13 @@ "type": "long" }, "Lifecycle": { - "shape": "Sa" + "shape": "Se" }, "RecoveryPointTags": { - "shape": "Sc" + "shape": "Sf" }, "BackupOptions": { - "shape": "Sm" + "shape": "Sp" } } }, @@ -2017,6 +2290,9 @@ "RecoveryPointArn": {}, "CreationDate": { "type": "timestamp" + }, + "IsParent": { + "type": "boolean" } } }, @@ -2042,7 +2318,7 @@ "IamRoleArn": {}, "IdempotencyToken": {}, "Lifecycle": { - "shape": "Sa" + "shape": "Se" } } }, @@ -2052,6 +2328,9 @@ "CopyJobId": {}, "CreationDate": { "type": "timestamp" + }, + "IsParent": { + "type": "boolean" } } }, @@ -2098,7 +2377,7 @@ "members": { "RecoveryPointArn": {}, "Metadata": { - "shape": "S41" + "shape": "S4f" }, "IamRoleArn": {}, "IdempotencyToken": {}, @@ -2146,7 +2425,7 @@ "locationName": "resourceArn" }, "Tags": { - "shape": "Sc" + "shape": "Sf" } } }, @@ -2192,7 +2471,7 @@ "locationName": "backupPlanId" }, "BackupPlan": { - "shape": "S2" + "shape": "S6" } } }, @@ -2206,7 +2485,7 @@ }, "VersionId": {}, "AdvancedBackupSettings": { - "shape": "Sj" + "shape": "Sm" } } }, @@ -2229,7 +2508,7 @@ }, "FrameworkDescription": {}, "FrameworkControls": { - "shape": "S1b" + "shape": "S1d" }, "IdempotencyToken": { "idempotencyToken": true @@ -2257,7 +2536,7 @@ "type": "structure", "members": { "GlobalSettings": { - "shape": "S2m" + "shape": "S2y" } } } @@ -2282,7 +2561,7 @@ "locationName": "recoveryPointArn" }, "Lifecycle": { - "shape": "Sa" + "shape": "Se" } } }, @@ -2292,10 +2571,10 @@ "BackupVaultArn": {}, "RecoveryPointArn": {}, "Lifecycle": { - "shape": "Sa" + "shape": "Se" }, "CalculatedLifecycle": { - "shape": "S2u" + "shape": "S36" } } }, @@ -2310,10 +2589,10 @@ "type": "structure", "members": { "ResourceTypeOptInPreference": { - "shape": "S2z" + "shape": "S3a" }, "ResourceTypeManagementPreference": { - "shape": "S31" + "shape": "S3c" } } } @@ -2335,10 +2614,10 @@ }, "ReportPlanDescription": {}, "ReportDeliveryChannel": { - "shape": "S1q" + "shape": "S1z" }, "ReportSetting": { - "shape": "S1s" + "shape": "S21" }, "IdempotencyToken": { "idempotencyToken": true @@ -2359,7 +2638,7 @@ } }, "shapes": { - "S2": { + "S6": { "type": "structure", "required": [ "BackupPlanName", @@ -2386,13 +2665,13 @@ "type": "long" }, "Lifecycle": { - "shape": "Sa" + "shape": "Se" }, "RecoveryPointTags": { - "shape": "Sc" + "shape": "Sf" }, "CopyActions": { - "shape": "Sf" + "shape": "Si" }, "EnableContinuousBackup": { "type": "boolean" @@ -2401,11 +2680,11 @@ } }, "AdvancedBackupSettings": { - "shape": "Sj" + "shape": "Sm" } } }, - "Sa": { + "Se": { "type": "structure", "members": { "MoveToColdStorageAfterDays": { @@ -2416,13 +2695,13 @@ } } }, - "Sc": { + "Sf": { "type": "map", "key": {}, "value": {}, "sensitive": true }, - "Sf": { + "Si": { "type": "list", "member": { "type": "structure", @@ -2431,30 +2710,30 @@ ], "members": { "Lifecycle": { - "shape": "Sa" + "shape": "Se" }, "DestinationBackupVaultArn": {} } } }, - "Sj": { + "Sm": { "type": "list", "member": { "type": "structure", "members": { "ResourceType": {}, "BackupOptions": { - "shape": "Sm" + "shape": "Sp" } } } }, - "Sm": { + "Sp": { "type": "map", "key": {}, "value": {} }, - "St": { + "Sv": { "type": "structure", "required": [ "SelectionName", @@ -2464,7 +2743,7 @@ "SelectionName": {}, "IamRoleArn": {}, "Resources": { - "shape": "Sw" + "shape": "Sy" }, "ListOfTags": { "type": "list", @@ -2483,32 +2762,32 @@ } }, "NotResources": { - "shape": "Sw" + "shape": "Sy" }, "Conditions": { "type": "structure", "members": { "StringEquals": { - "shape": "S13" + "shape": "S15" }, "StringNotEquals": { - "shape": "S13" + "shape": "S15" }, "StringLike": { - "shape": "S13" + "shape": "S15" }, "StringNotLike": { - "shape": "S13" + "shape": "S15" } } } } }, - "Sw": { + "Sy": { "type": "list", "member": {} }, - "S13": { + "S15": { "type": "list", "member": { "type": "structure", @@ -2518,7 +2797,7 @@ } } }, - "S1b": { + "S1d": { "type": "list", "member": { "type": "structure", @@ -2549,19 +2828,47 @@ "member": {} }, "Tags": { - "shape": "S1l" + "shape": "S1n" } } } } } }, - "S1l": { + "S1n": { "type": "map", "key": {}, "value": {} }, "S1q": { + "type": "structure", + "members": { + "VaultNames": { + "type": "list", + "member": {} + }, + "ResourceIdentifiers": { + "type": "list", + "member": {} + }, + "DateRange": { + "type": "structure", + "required": [ + "FromDate", + "ToDate" + ], + "members": { + "FromDate": { + "type": "timestamp" + }, + "ToDate": { + "type": "timestamp" + } + } + } + } + }, + "S1z": { "type": "structure", "required": [ "S3BucketName" @@ -2575,7 +2882,7 @@ } } }, - "S1s": { + "S21": { "type": "structure", "required": [ "ReportTemplate" @@ -2583,18 +2890,27 @@ "members": { "ReportTemplate": {}, "FrameworkArns": { - "shape": "S1t" + "shape": "S22" }, "NumberOfFrameworks": { "type": "integer" + }, + "Accounts": { + "shape": "S22" + }, + "OrganizationUnits": { + "shape": "S22" + }, + "Regions": { + "shape": "S22" } } }, - "S1t": { + "S22": { "type": "list", "member": {} }, - "S2a": { + "S2j": { "type": "structure", "members": { "BackupPlanId": {}, @@ -2603,7 +2919,7 @@ "BackupRuleId": {} } }, - "S2g": { + "S2r": { "type": "structure", "members": { "AccountId": {}, @@ -2626,17 +2942,32 @@ }, "IamRoleArn": {}, "CreatedBy": { - "shape": "S2a" + "shape": "S2j" + }, + "ResourceType": {}, + "ParentJobId": {}, + "IsParent": { + "type": "boolean" }, - "ResourceType": {} + "CompositeMemberIdentifier": {}, + "NumberOfChildJobs": { + "type": "long" + }, + "ChildJobsInState": { + "type": "map", + "key": {}, + "value": { + "type": "long" + } + } } }, - "S2m": { + "S2y": { "type": "map", "key": {}, "value": {} }, - "S2u": { + "S36": { "type": "structure", "members": { "MoveToColdStorageAt": { @@ -2647,21 +2978,21 @@ } } }, - "S2z": { + "S3a": { "type": "map", "key": {}, "value": { "type": "boolean" } }, - "S31": { + "S3c": { "type": "map", "key": {}, "value": { "type": "boolean" } }, - "S35": { + "S3g": { "type": "structure", "members": { "ReportJobId": {}, @@ -2680,23 +3011,23 @@ "members": { "S3BucketName": {}, "S3Keys": { - "shape": "S1t" + "shape": "S22" } } } } }, - "S39": { + "S3k": { "type": "structure", "members": { "ReportPlanArn": {}, "ReportPlanName": {}, "ReportPlanDescription": {}, "ReportSetting": { - "shape": "S1s" + "shape": "S21" }, "ReportDeliveryChannel": { - "shape": "S1q" + "shape": "S1z" }, "DeploymentStatus": {}, "CreationTime": { @@ -2710,7 +3041,7 @@ } } }, - "S3j": { + "S3v": { "type": "structure", "required": [ "BackupPlanName", @@ -2737,14 +3068,14 @@ "type": "long" }, "Lifecycle": { - "shape": "Sa" + "shape": "Se" }, "RecoveryPointTags": { - "shape": "Sc" + "shape": "Sf" }, "RuleId": {}, "CopyActions": { - "shape": "Sf" + "shape": "Si" }, "EnableContinuousBackup": { "type": "boolean" @@ -2753,21 +3084,21 @@ } }, "AdvancedBackupSettings": { - "shape": "Sj" + "shape": "Sm" } } }, - "S3x": { + "S49": { "type": "list", "member": {} }, - "S41": { + "S4f": { "type": "map", "key": {}, "value": {}, "sensitive": true }, - "S4i": { + "S4w": { "type": "structure", "members": { "BackupPlanArn": {}, @@ -2785,7 +3116,7 @@ "type": "timestamp" }, "AdvancedBackupSettings": { - "shape": "Sj" + "shape": "Sm" } } } diff --git a/apis/backup-2018-11-15.normal.json b/apis/backup-2018-11-15.normal.json index c750c110f3..877acc3e88 100644 --- a/apis/backup-2018-11-15.normal.json +++ b/apis/backup-2018-11-15.normal.json @@ -11,6 +11,39 @@ "uid": "backup-2018-11-15" }, "operations": { + "CancelLegalHold": { + "name": "CancelLegalHold", + "http": { + "method": "DELETE", + "requestUri": "/legal-holds/{legalHoldId}", + "responseCode": 201 + }, + "input": { + "shape": "CancelLegalHoldInput" + }, + "output": { + "shape": "CancelLegalHoldOutput" + }, + "errors": [ + { + "shape": "InvalidParameterValueException" + }, + { + "shape": "InvalidResourceStateException" + }, + { + "shape": "MissingParameterValueException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "
This action removes the specified legal hold on a recovery point. This action can only be performed by a user with sufficient permissions.
", + "idempotent": true + }, "CreateBackupPlan": { "name": "CreateBackupPlan", "http": { @@ -139,6 +172,35 @@ "documentation": "Creates a framework with one or more controls. A framework is a collection of controls that you can use to evaluate your backup practices. By using pre-built customizable controls to define your policies, you can evaluate whether your backup practices comply with your policies and which resources are not yet in compliance.
", "idempotent": true }, + "CreateLegalHold": { + "name": "CreateLegalHold", + "http": { + "method": "POST", + "requestUri": "/legal-holds/" + }, + "input": { + "shape": "CreateLegalHoldInput" + }, + "output": { + "shape": "CreateLegalHoldOutput" + }, + "errors": [ + { + "shape": "InvalidParameterValueException" + }, + { + "shape": "MissingParameterValueException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "LimitExceededException" + } + ], + "documentation": "This action creates a legal hold on a recovery point (backup). A legal hold is a restraint on altering or deleting a backup until an authorized user cancels the legal hold. Any actions to delete or disassociate a recovery point will fail with an error if one or more active legal holds are on the recovery point.
", + "idempotent": true + }, "CreateReportPlan": { "name": "CreateReportPlan", "http": { @@ -393,7 +455,7 @@ "shape": "InvalidRequestException" } ], - "documentation": "Deletes the recovery point specified by a recovery point ID.
If the recovery point ID belongs to a continuous backup, calling this endpoint deletes the existing continuous backup and stops future continuous backup.
", + "documentation": "Deletes the recovery point specified by a recovery point ID.
If the recovery point ID belongs to a continuous backup, calling this endpoint deletes the existing continuous backup and stops future continuous backup.
When an IAM role's permissions are insufficient to call this API, the service sends back an HTTP 200 response with an empty HTTP body, but the recovery point is not deleted. Instead, it enters an EXPIRED
state.
EXPIRED
recovery points can be deleted with this API once the IAM role has the iam:CreateServiceLinkedRole
action. To learn more about adding this role, see Troubleshooting manual deletions.
If the user or role is deleted or the permission within the role is removed, the deletion will not be successful and will enter an EXPIRED
state.
Deletes the specified continuous backup recovery point from Backup and releases control of that continuous backup to the source service, such as Amazon RDS. The source service will continue to create and retain continuous backups using the lifecycle that you specified in your original backup plan.
Does not support snapshot backup recovery points.
" }, + "DisassociateRecoveryPointFromParent": { + "name": "DisassociateRecoveryPointFromParent", + "http": { + "method": "DELETE", + "requestUri": "/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}/parentAssociation", + "responseCode": 204 + }, + "input": { + "shape": "DisassociateRecoveryPointFromParentInput" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "InvalidParameterValueException" + }, + { + "shape": "MissingParameterValueException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "InvalidRequestException" + } + ], + "documentation": "This action to a specific child (nested) recovery point removes the relationship between the specified recovery point and its parent (composite) recovery point.
" + }, "ExportBackupPlanTemplate": { "name": "ExportBackupPlanTemplate", "http": { @@ -961,6 +1052,35 @@ "documentation": "Returns event notifications for the specified backup vault.
", "idempotent": true }, + "GetLegalHold": { + "name": "GetLegalHold", + "http": { + "method": "GET", + "requestUri": "/legal-holds/{legalHoldId}/" + }, + "input": { + "shape": "GetLegalHoldInput" + }, + "output": { + "shape": "GetLegalHoldOutput" + }, + "errors": [ + { + "shape": "InvalidParameterValueException" + }, + { + "shape": "MissingParameterValueException" + }, + { + "shape": "ServiceUnavailableException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "This action returns details for a specified legal hold. The details are the body of a legal hold in JSON format, in addition to metadata.
", + "idempotent": true + }, "GetRecoveryPointRestoreMetadata": { "name": "GetRecoveryPointRestoreMetadata", "http": { @@ -1217,6 +1337,29 @@ ], "documentation": "Returns a list of all frameworks for an Amazon Web Services account and Amazon Web Services Region.
" }, + "ListLegalHolds": { + "name": "ListLegalHolds", + "http": { + "method": "GET", + "requestUri": "/legal-holds/" + }, + "input": { + "shape": "ListLegalHoldsInput" + }, + "output": { + "shape": "ListLegalHoldsOutput" + }, + "errors": [ + { + "shape": "InvalidParameterValueException" + }, + { + "shape": "ServiceUnavailableException" + } + ], + "documentation": "This action returns metadata about active and previous legal holds.
", + "idempotent": true + }, "ListProtectedResources": { "name": "ListProtectedResources", "http": { @@ -1269,6 +1412,32 @@ "documentation": "Returns detailed information about the recovery points stored in a backup vault.
", "idempotent": true }, + "ListRecoveryPointsByLegalHold": { + "name": "ListRecoveryPointsByLegalHold", + "http": { + "method": "GET", + "requestUri": "/legal-holds/{legalHoldId}/recovery-points" + }, + "input": { + "shape": "ListRecoveryPointsByLegalHoldInput" + }, + "output": { + "shape": "ListRecoveryPointsByLegalHoldOutput" + }, + "errors": [ + { + "shape": "InvalidParameterValueException" + }, + { + "shape": "MissingParameterValueException" + }, + { + "shape": "ServiceUnavailableException" + } + ], + "documentation": "This action returns recovery point ARNs (Amazon Resource Names) of the specified legal hold.
", + "idempotent": true + }, "ListRecoveryPointsByResource": { "name": "ListRecoveryPointsByResource", "http": { @@ -1316,6 +1485,9 @@ }, { "shape": "ServiceUnavailableException" + }, + { + "shape": "ResourceNotFoundException" } ], "documentation": "Returns details about your report jobs.
" @@ -1635,7 +1807,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Attempts to cancel a job to create a one-time backup of a resource.
" + "documentation": "Attempts to cancel a job to create a one-time backup of a resource.
This action is not supported for the following services: Amazon FSx for Windows File Server, Amazon FSx for Lustre, FSx for ONTAP , Amazon FSx for OpenZFS, Amazon DocumentDB (with MongoDB compatibility), Amazon RDS, Amazon Aurora, and Amazon Neptune.
" }, "TagResource": { "name": "TagResource", @@ -1981,10 +2153,27 @@ "BackupType": { "shape": "string", "documentation": "Represents the type of backup for a backup job.
" + }, + "ParentJobId": { + "shape": "string", + "documentation": "This uniquely identifies a request to Backup to back up a resource. The return will be the parent (composite) job ID.
" + }, + "IsParent": { + "shape": "boolean", + "documentation": "This is a boolean value indicating this is a parent (composite) backup job.
" } }, "documentation": "Contains detailed information about a backup job.
" }, + "BackupJobChildJobsInState": { + "type": "map", + "key": { + "shape": "BackupJobState" + }, + "value": { + "shape": "Long" + } + }, "BackupJobState": { "type": "string", "enum": [ @@ -1995,7 +2184,8 @@ "ABORTED", "COMPLETED", "FAILED", - "EXPIRED" + "EXPIRED", + "PARTIAL" ] }, "BackupJobsList": { @@ -2163,7 +2353,7 @@ }, "StartWindowMinutes": { "shape": "WindowMinutes", - "documentation": "A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional.
" + "documentation": "A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors.
" }, "CompletionWindowMinutes": { "shape": "WindowMinutes", @@ -2213,7 +2403,7 @@ }, "StartWindowMinutes": { "shape": "WindowMinutes", - "documentation": "A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional.
" + "documentation": "A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors.
" }, "CompletionWindowMinutes": { "shape": "WindowMinutes", @@ -2429,6 +2619,37 @@ }, "documentation": "Contains DeleteAt
and MoveToColdStorageAt
timestamps, which are used to specify a lifecycle for a recovery point.
The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define.
Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.
Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by resource table. Backup ignores this expression for other resource types.
" }, + "CancelLegalHoldInput": { + "type": "structure", + "required": [ + "LegalHoldId", + "CancelDescription" + ], + "members": { + "LegalHoldId": { + "shape": "string", + "documentation": "Legal hold ID required to remove the specified legal hold on a recovery point.
", + "location": "uri", + "locationName": "legalHoldId" + }, + "CancelDescription": { + "shape": "string", + "documentation": "String describing the reason for removing the legal hold.
", + "location": "querystring", + "locationName": "cancelDescription" + }, + "RetainRecordInDays": { + "shape": "Long", + "documentation": "The integer amount in days specifying amount of days after this API operation to remove legal hold.
", + "location": "querystring", + "locationName": "retainRecordInDays" + } + } + }, + "CancelLegalHoldOutput": { + "type": "structure", + "members": {} + }, "ComplianceResourceIdList": { "type": "list", "member": { @@ -2638,17 +2859,47 @@ "ResourceType": { "shape": "ResourceType", "documentation": "The type of Amazon Web Services resource to be copied; for example, an Amazon Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) database.
" + }, + "ParentJobId": { + "shape": "string", + "documentation": "This uniquely identifies a request to Backup to copy a resource. The return will be the parent (composite) job ID.
" + }, + "IsParent": { + "shape": "boolean", + "documentation": "This is a boolean value indicating this is a parent (composite) copy job.
" + }, + "CompositeMemberIdentifier": { + "shape": "string", + "documentation": "This is the identifier of a resource within a composite group, such as nested (child) recovery point belonging to a composite (parent) stack. The ID is transferred from the logical ID within a stack.
" + }, + "NumberOfChildJobs": { + "shape": "Long", + "documentation": "This is the number of child (nested) copy jobs.
" + }, + "ChildJobsInState": { + "shape": "CopyJobChildJobsInState", + "documentation": "This returns the statistics of the included child (nested) copy jobs.
" } }, "documentation": "Contains detailed information about a copy job.
" }, + "CopyJobChildJobsInState": { + "type": "map", + "key": { + "shape": "CopyJobState" + }, + "value": { + "shape": "Long" + } + }, "CopyJobState": { "type": "string", "enum": [ "CREATED", "RUNNING", "COMPLETED", - "FAILED" + "FAILED", + "PARTIAL" ] }, "CopyJobsList": { @@ -2828,6 +3079,68 @@ } } }, + "CreateLegalHoldInput": { + "type": "structure", + "required": [ + "Title", + "Description" + ], + "members": { + "Title": { + "shape": "string", + "documentation": "This is the string title of the legal hold.
" + }, + "Description": { + "shape": "string", + "documentation": "This is the string description of the legal hold.
" + }, + "IdempotencyToken": { + "shape": "string", + "documentation": "This is a user-chosen string used to distinguish between otherwise identical calls. Retrying a successful request with the same idempotency token results in a success message with no action taken.
" + }, + "RecoveryPointSelection": { + "shape": "RecoveryPointSelection", + "documentation": "This specifies criteria to assign a set of resources, such as resource types or backup vaults.
" + }, + "Tags": { + "shape": "Tags", + "documentation": "Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ : /.
" + } + } + }, + "CreateLegalHoldOutput": { + "type": "structure", + "members": { + "Title": { + "shape": "string", + "documentation": "This is the string title of the legal hold returned after creating the legal hold.
" + }, + "Status": { + "shape": "LegalHoldStatus", + "documentation": "This displays the status of the legal hold returned after creating the legal hold. Statuses can be ACTIVE
, PENDING
, CANCELED
, CANCELING
, or FAILED
.
This is the returned string description of the legal hold.
" + }, + "LegalHoldId": { + "shape": "string", + "documentation": "Legal hold ID returned for the specified legal hold on a recovery point.
" + }, + "LegalHoldArn": { + "shape": "ARN", + "documentation": "This is the ARN (Amazon Resource Number) of the created legal hold.
" + }, + "CreationDate": { + "shape": "timestamp", + "documentation": "Time in number format when legal hold was created.
" + }, + "RecoveryPointSelection": { + "shape": "RecoveryPointSelection", + "documentation": "This specifies criteria to assign a set of resources, such as resource types or backup vaults.
" + } + } + }, "CreateReportPlanInput": { "type": "structure", "required": [ @@ -2883,6 +3196,24 @@ "CronExpression": { "type": "string" }, + "DateRange": { + "type": "structure", + "required": [ + "FromDate", + "ToDate" + ], + "members": { + "FromDate": { + "shape": "timestamp", + "documentation": "This value is the beginning date, inclusive.
The date and time are in Unix format and Coordinated Universal Time (UTC), and it is accurate to milliseconds (milliseconds are optional).
" + }, + "ToDate": { + "shape": "timestamp", + "documentation": "This value is the end date, inclusive.
The date and time are in Unix format and Coordinated Universal Time (UTC), and it is accurate to milliseconds (milliseconds are optional).
" + } + }, + "documentation": "This is a resource filter containing FromDate: DateTime and ToDate: DateTime. Both values are required. Future DateTime values are not permitted.
The date and time are in Unix format and Coordinated Universal Time (UTC), and it is accurate to milliseconds ((milliseconds are optional). For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
" + }, "DeleteBackupPlanInput": { "type": "structure", "required": [ @@ -3140,6 +3471,22 @@ "BackupType": { "shape": "string", "documentation": "Represents the actual backup type selected for a backup job. For example, if a successful Windows Volume Shadow Copy Service (VSS) backup was taken, BackupType
returns \"WindowsVSS\"
. If BackupType
is empty, then the backup type was a regular backup.
This returns the parent (composite) resource backup job ID.
" + }, + "IsParent": { + "shape": "boolean", + "documentation": "This returns the boolean value that a backup job is a parent (composite) job.
" + }, + "NumberOfChildJobs": { + "shape": "Long", + "documentation": "This returns the number of child (nested) backup jobs.
" + }, + "ChildJobsInState": { + "shape": "BackupJobChildJobsInState", + "documentation": "This returns the statistics of the included child (nested) backup jobs.
" } } }, @@ -3260,7 +3607,7 @@ }, "CreationTime": { "shape": "timestamp", - "documentation": "The date and time that a framework is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationTime
is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
The date and time that a framework is created, in ISO 8601 representation. The value of CreationTime
is accurate to milliseconds. For example, 2020-07-10T15:00:00.000-08:00 represents the 10th of July 2020 at 3:00 PM 8 hours behind UTC.
A status code specifying the state of the recovery point.
PARTIAL
status indicates Backup could not create the recovery point before the backup window closed. To increase your backup plan window using the API, see UpdateBackupPlan. You can also increase your backup plan window using the Console by choosing and editing your backup plan.
EXPIRED
status indicates that the recovery point has exceeded its retention period, but Backup lacks permission or is otherwise unable to delete it. To manually delete these recovery points, see Step 3: Delete the recovery points in the Clean up resources section of Getting started.
A status code specifying the state of the recovery point.
PARTIAL
status indicates Backup could not create the recovery point before the backup window closed. To increase your backup plan window using the API, see UpdateBackupPlan. You can also increase your backup plan window using the Console by choosing and editing your backup plan.
EXPIRED
status indicates that the recovery point has exceeded its retention period, but Backup lacks permission or is otherwise unable to delete it. To manually delete these recovery points, see Step 3: Delete the recovery points in the Clean up resources section of Getting started.
STOPPED
status occurs on a continuous backup where a user has taken some action that causes the continuous backup to be disabled. This can be caused by the removal of permissions, turning off versioning, turning off events being sent to EventBridge, or disabling the EventBridge rules that are put in place by Backup.
To resolve STOPPED
status, ensure that all requested permissions are in place and that versioning is enabled on the S3 bucket. Once these conditions are met, the next instance of a backup rule running will result in a new continuous recovery point being created. The recovery points with STOPPED status do not need to be deleted.
The date and time that a recovery point was last restored, in Unix format and Coordinated Universal Time (UTC). The value of LastRestoreTime
is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
This is an ARN that uniquely identifies a parent (composite) recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45
.
This is the identifier of a resource within a composite group, such as nested (child) recovery point belonging to a composite (parent) stack. The ID is transferred from the logical ID within a stack.
" + }, + "IsParent": { + "shape": "boolean", + "documentation": "This returns the boolean value that a recovery point is a parent (composite) job.
" } } }, @@ -3560,6 +3919,27 @@ } } }, + "DisassociateRecoveryPointFromParentInput": { + "type": "structure", + "required": [ + "BackupVaultName", + "RecoveryPointArn" + ], + "members": { + "BackupVaultName": { + "shape": "BackupVaultName", + "documentation": "This is the name of a logical container where the child (nested) recovery point is stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens.
", + "location": "uri", + "locationName": "backupVaultName" + }, + "RecoveryPointArn": { + "shape": "ARN", + "documentation": "This is the Amazon Resource Name (ARN) that uniquely identifies the child (nested) recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.
The date and time that a framework is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationTime
is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
The date and time that a framework is created, in ISO 8601 representation. The value of CreationTime
is accurate to milliseconds. For example, 2020-07-10T15:00:00.000-08:00 represents the 10th of July 2020 at 3:00 PM 8 hours behind UTC.
This is the ID required to use GetLegalHold
. This unique ID is associated with a specific legal hold.
This is the string title of the legal hold.
" + }, + "Status": { + "shape": "LegalHoldStatus", + "documentation": "This is the status of the legal hold. Statuses can be ACTIVE
, CREATING
, CANCELED
, and CANCELING
.
This is the returned string description of the legal hold.
" + }, + "CancelDescription": { + "shape": "string", + "documentation": "String describing the reason for removing the legal hold.
" + }, + "LegalHoldId": { + "shape": "string", + "documentation": "This is the returned ID associated with a specified legal hold.
" + }, + "LegalHoldArn": { + "shape": "ARN", + "documentation": "This is the returned framework ARN for the specified legal hold. An Amazon Resource Name (ARN) uniquely identifies a resource. The format of the ARN depends on the resource type.
" + }, + "CreationDate": { + "shape": "timestamp", + "documentation": "Time in number format when legal hold was created.
" + }, + "CancellationDate": { + "shape": "timestamp", + "documentation": "Time in number when legal hold was cancelled.
" + }, + "RetainRecordUntil": { + "shape": "timestamp", + "documentation": "This is the date and time until which the legal hold record will be retained.
" + }, + "RecoveryPointSelection": { + "shape": "RecoveryPointSelection", + "documentation": "This specifies criteria to assign a set of resources, such as resource types or backup vaults.
" + } + } + }, "GetRecoveryPointRestoreMetadataInput": { "type": "structure", "required": [ @@ -3973,6 +4412,55 @@ "IsEnabled": { "type": "boolean" }, + "LegalHold": { + "type": "structure", + "members": { + "Title": { + "shape": "string", + "documentation": "This is the title of a legal hold.
" + }, + "Status": { + "shape": "LegalHoldStatus", + "documentation": "This is the status of the legal hold. Statuses can be ACTIVE
, CREATING
, CANCELED
, and CANCELING
.
This is the description of a legal hold.
" + }, + "LegalHoldId": { + "shape": "string", + "documentation": "ID of specific legal hold on one or more recovery points.
" + }, + "LegalHoldArn": { + "shape": "ARN", + "documentation": "This is an Amazon Resource Number (ARN) that uniquely identifies the legal hold; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45
.
This is the time in number format when legal hold was created.
" + }, + "CancellationDate": { + "shape": "timestamp", + "documentation": "This is the time in number format when legal hold was cancelled.
" + } + }, + "documentation": "A legal hold is an administrative tool that helps prevent backups from being deleted while under a hold. While the hold is in place, backups under a hold cannot be deleted and lifecycle policies that would alter the backup status (such as transition to cold storage) are delayed until the legal hold is removed. A backup can have more than one legal hold. Legal holds are applied to one or more backups (also known as recovery points). These backups can be filtered by resource types and by resource IDs.
" + }, + "LegalHoldStatus": { + "type": "string", + "enum": [ + "CREATING", + "ACTIVE", + "CANCELING", + "CANCELED" + ] + }, + "LegalHoldsList": { + "type": "list", + "member": { + "shape": "LegalHold" + } + }, "Lifecycle": { "type": "structure", "members": { @@ -4055,6 +4543,12 @@ "documentation": "Returns only backup jobs completed before a date expressed in Unix format and Coordinated Universal Time (UTC).
", "location": "querystring", "locationName": "completeBefore" + }, + "ByParentJobId": { + "shape": "string", + "documentation": "This is a filter to list child (nested) jobs based on parent job ID.
", + "location": "querystring", + "locationName": "parentJobId" } } }, @@ -4313,6 +4807,12 @@ "documentation": "Returns only copy jobs completed after a date expressed in Unix format and Coordinated Universal Time (UTC).
", "location": "querystring", "locationName": "completeAfter" + }, + "ByParentJobId": { + "shape": "string", + "documentation": "This is a filter to list child (nested) jobs based on parent job ID.
", + "location": "querystring", + "locationName": "parentJobId" } } }, @@ -4359,6 +4859,36 @@ } } }, + "ListLegalHoldsInput": { + "type": "structure", + "members": { + "NextToken": { + "shape": "string", + "documentation": "The next item following a partial list of returned resources. For example, if a request is made to return maxResults
number of resources, NextToken
allows you to return more items in your list starting at the location pointed to by the next token.
The maximum number of resource list items to be returned.
", + "location": "querystring", + "locationName": "maxResults" + } + } + }, + "ListLegalHoldsOutput": { + "type": "structure", + "members": { + "NextToken": { + "shape": "string", + "documentation": "The next item following a partial list of returned resources. For example, if a request is made to return maxResults
number of resources, NextToken
allows you to return more items in your list starting at the location pointed to by the next token.
This is an array of returned legal holds, both active and previous.
" + } + } + }, "ListOfTags": { "type": "list", "member": { @@ -4448,6 +4978,12 @@ "documentation": "Returns only recovery points that were created after the specified timestamp.
", "location": "querystring", "locationName": "createdAfter" + }, + "ByParentRecoveryPointArn": { + "shape": "ARN", + "documentation": "This returns only recovery points that match the specified parent (composite) recovery point Amazon Resource Name (ARN).
", + "location": "querystring", + "locationName": "parentRecoveryPointArn" } } }, @@ -4464,6 +5000,45 @@ } } }, + "ListRecoveryPointsByLegalHoldInput": { + "type": "structure", + "required": [ + "LegalHoldId" + ], + "members": { + "LegalHoldId": { + "shape": "string", + "documentation": "This is the ID of the legal hold.
", + "location": "uri", + "locationName": "legalHoldId" + }, + "NextToken": { + "shape": "string", + "documentation": "This is the next item following a partial list of returned resources. For example, if a request is made to return maxResults
number of resources, NextToken
allows you to return more items in your list starting at the location pointed to by the next token.
This is the maximum number of resource list items to be returned.
", + "location": "querystring", + "locationName": "maxResults" + } + } + }, + "ListRecoveryPointsByLegalHoldOutput": { + "type": "structure", + "members": { + "RecoveryPoints": { + "shape": "RecoveryPointsList", + "documentation": "This is a list of the recovery points returned by ListRecoveryPointsByLegalHold
.
This return is the next item following a partial list of returned resources.
" + } + } + }, "ListRecoveryPointsByResourceInput": { "type": "structure", "required": [ @@ -4815,7 +5390,7 @@ }, "BackupVaultEvents": { "shape": "BackupVaultEvents", - "documentation": "An array of events that indicate the status of jobs to back up resources to the backup vault.
For common use cases and code samples, see Using Amazon SNS to track Backup events.
The following events are supported:
BACKUP_JOB_STARTED
| BACKUP_JOB_COMPLETED
COPY_JOB_STARTED
| COPY_JOB_SUCCESSFUL
| COPY_JOB_FAILED
RESTORE_JOB_STARTED
| RESTORE_JOB_COMPLETED
| RECOVERY_POINT_MODIFIED
S3_BACKUP_OBJECT_FAILED
| S3_RESTORE_OBJECT_FAILED
Ignore the list below because it includes deprecated events. Refer to the list above.
An array of events that indicate the status of jobs to back up resources to the backup vault.
For common use cases and code samples, see Using Amazon SNS to track Backup events.
The following events are supported:
BACKUP_JOB_STARTED
| BACKUP_JOB_COMPLETED
COPY_JOB_STARTED
| COPY_JOB_SUCCESSFUL
| COPY_JOB_FAILED
RESTORE_JOB_STARTED
| RESTORE_JOB_COMPLETED
| RECOVERY_POINT_MODIFIED
S3_BACKUP_OBJECT_FAILED
| S3_RESTORE_OBJECT_FAILED
The list below shows items that are deprecated events (for reference) and are no longer in use. They are no longer supported and will not return statuses or notifications. Refer to the list above for current supported events.
The date and time a recovery point was last restored, in Unix format and Coordinated Universal Time (UTC). The value of LastRestoreTime
is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
This is the Amazon Resource Name (ARN) of the parent (composite) recovery point.
" + }, + "CompositeMemberIdentifier": { + "shape": "string", + "documentation": "This is the identifier of a resource within a composite group, such as nested (child) recovery point belonging to a composite (parent) stack. The ID is transferred from the logical ID within a stack.
" + }, + "IsParent": { + "shape": "boolean", + "documentation": "This is a boolean value indicating this is a parent (composite) recovery point.
" } }, "documentation": "Contains detailed information about the recovery points stored in a backup vault.
" @@ -4933,6 +5520,14 @@ "BackupVaultName": { "shape": "BackupVaultName", "documentation": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens.
" + }, + "IsParent": { + "shape": "boolean", + "documentation": "This is a boolean value indicating this is a parent (composite) recovery point.
" + }, + "ParentRecoveryPointArn": { + "shape": "ARN", + "documentation": "This is the Amazon Resource Name (ARN) of the parent (composite) recovery point.
" } }, "documentation": "Contains detailed information about a saved recovery point.
" @@ -4965,6 +5560,33 @@ }, "documentation": "Contains information about the backup plan and rule that Backup used to initiate the recovery point backup.
" }, + "RecoveryPointMember": { + "type": "structure", + "members": { + "RecoveryPointArn": { + "shape": "ARN", + "documentation": "This is the Amazon Resource Name (ARN) of the parent (composite) recovery point.
" + } + }, + "documentation": "This is a recovery point which is a child (nested) recovery point of a parent (composite) recovery point. These recovery points can be disassociated from their parent (composite) recovery point, in which case they will no longer be a member.
" + }, + "RecoveryPointSelection": { + "type": "structure", + "members": { + "VaultNames": { + "shape": "VaultNames", + "documentation": "These are the names of the vaults in which the selected recovery points are contained.
" + }, + "ResourceIdentifiers": { + "shape": "ResourceIdentifiers", + "documentation": "These are the resources included in the resource selection (including type of resources and vaults).
" + }, + "DateRange": { + "shape": "DateRange" + } + }, + "documentation": "This specifies criteria to assign a set of resources, such as resource types or backup vaults.
" + }, "RecoveryPointStatus": { "type": "string", "enum": [ @@ -4974,6 +5596,12 @@ "EXPIRED" ] }, + "RecoveryPointsList": { + "type": "list", + "member": { + "shape": "RecoveryPointMember" + } + }, "ReportDeliveryChannel": { "type": "structure", "required": [ @@ -5133,6 +5761,18 @@ "NumberOfFrameworks": { "shape": "integer", "documentation": "The number of frameworks a report covers.
" + }, + "Accounts": { + "shape": "stringList", + "documentation": "These are the accounts to be included in the report.
" + }, + "OrganizationUnits": { + "shape": "stringList", + "documentation": "These are the Organizational Units to be included in the report.
" + }, + "Regions": { + "shape": "stringList", + "documentation": "These are the Regions to be included in the report.
" } }, "documentation": "Contains detailed information about a report setting.
" @@ -5143,6 +5783,12 @@ "shape": "ARN" } }, + "ResourceIdentifiers": { + "type": "list", + "member": { + "shape": "string" + } + }, "ResourceType": { "type": "string", "pattern": "^[a-zA-Z0-9\\-\\_\\.]{1,50}$" @@ -5280,7 +5926,7 @@ }, "StartWindowMinutes": { "shape": "WindowMinutes", - "documentation": "A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 hours.
" + "documentation": "A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 hours. If this value is included, it must be at least 60 minutes to avoid errors.
" }, "CompleteWindowMinutes": { "shape": "WindowMinutes", @@ -5314,6 +5960,10 @@ "CreationDate": { "shape": "timestamp", "documentation": "The date and time that a backup job is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate
is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
This is a returned boolean value indicating this is a parent (composite) backup job.
" } } }, @@ -5361,6 +6011,10 @@ "CreationDate": { "shape": "timestamp", "documentation": "The date and time that a copy job is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate
is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
This is a returned boolean value indicating this is a parent (composite) copy job.
" } } }, @@ -5409,7 +6063,7 @@ }, "IamRoleArn": { "shape": "IAMRoleArn", - "documentation": "The Amazon Resource Name (ARN) of the IAM role that Backup uses to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access
.
The Amazon Resource Name (ARN) of the IAM role that Backup uses to create the target resource; for example: arn:aws:iam::123456789012:role/S3Access
.
The date and time that a framework is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationTime
is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.
The date and time that a framework is created, in ISO 8601 representation. The value of CreationTime
is accurate to milliseconds. For example, 2020-07-10T15:00:00.000-08:00 represents the 10th of July 2020 at 3:00 PM 8 hours behind UTC.
Causes the data replication initiation sequence to begin immediately upon next Handshake for the specified Source Server ID, regardless of when the previous initiation started. This command will work only if the Source Server is stalled or is in a DISCONNECTED or STOPPED state.
" }, + "ReverseReplication": { + "name": "ReverseReplication", + "http": { + "method": "POST", + "requestUri": "/ReverseReplication", + "responseCode": 200 + }, + "input": { + "shape": "ReverseReplicationRequest" + }, + "output": { + "shape": "ReverseReplicationResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "UninitializedAccountException" + } + ], + "documentation": "Start replication to origin / target region - applies only to protected instances that originated in EC2. For recovery instances on target region - starts replication back to origin region. For failback instances on origin region - starts replication to target region to re-protect them.
" + }, "StartFailbackLaunch": { "name": "StartFailbackLaunch", "http": { @@ -772,6 +810,38 @@ ], "documentation": "Launches Recovery Instances for the specified Source Servers. For each Source Server you may choose a point in time snapshot to launch from, or use an on demand snapshot.
" }, + "StartReplication": { + "name": "StartReplication", + "http": { + "method": "POST", + "requestUri": "/StartReplication", + "responseCode": 200 + }, + "input": { + "shape": "StartReplicationRequest" + }, + "output": { + "shape": "StartReplicationResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "UninitializedAccountException" + } + ], + "documentation": "Starts replication for a stopped Source Server. This action would make the Source Server protected again and restart billing for it.
" + }, "StopFailback": { "name": "StopFailback", "http": { @@ -798,6 +868,38 @@ ], "documentation": "Stops the failback process for a specified Recovery Instance. This changes the Failback State of the Recovery Instance back to FAILBACK_NOT_STARTED.
" }, + "StopReplication": { + "name": "StopReplication", + "http": { + "method": "POST", + "requestUri": "/StopReplication", + "responseCode": 200 + }, + "input": { + "shape": "StopReplicationRequest" + }, + "output": { + "shape": "StopReplicationResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "UninitializedAccountException" + } + ], + "documentation": "Stops replication for a Source Server. This action would make the Source Server unprotected, delete its existing snapshots and stop billing for it.
" + }, "TagResource": { "name": "TagResource", "http": { @@ -1069,6 +1171,18 @@ "max": 50, "min": 0 }, + "AwsAvailabilityZone": { + "type": "string", + "max": 255, + "min": 0, + "pattern": "^(us(-gov)?|ap|ca|cn|eu|sa|af|me)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-[0-9][a-z]$" + }, + "AwsRegion": { + "type": "string", + "max": 255, + "min": 0, + "pattern": "^(us(-gov)?|ap|ca|cn|eu|sa|af|me)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-[0-9]$" + }, "Boolean": { "type": "boolean", "box": true @@ -1840,6 +1954,13 @@ "NOT_EXTENDED" ] }, + "FailbackLaunchType": { + "type": "string", + "enum": [ + "RECOVERY", + "DRILL" + ] + }, "FailbackReplicationError": { "type": "string", "enum": [ @@ -1851,7 +1972,19 @@ "FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE_TO_FAILBACK_CLIENT", "FAILED_TO_CONFIGURE_REPLICATION_SOFTWARE", "FAILED_TO_PAIR_AGENT_WITH_REPLICATION_SOFTWARE", - "FAILED_TO_ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION" + "FAILED_TO_ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION", + "FAILED_GETTING_REPLICATION_STATE", + "SNAPSHOTS_FAILURE", + "FAILED_TO_CREATE_SECURITY_GROUP", + "FAILED_TO_LAUNCH_REPLICATION_SERVER", + "FAILED_TO_BOOT_REPLICATION_SERVER", + "FAILED_TO_AUTHENTICATE_WITH_SERVICE", + "FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE", + "FAILED_TO_CREATE_STAGING_DISKS", + "FAILED_TO_ATTACH_STAGING_DISKS", + "FAILED_TO_PAIR_REPLICATION_SERVER_WITH_AGENT", + "FAILED_TO_CONNECT_AGENT_TO_REPLICATION_SERVER", + "FAILED_TO_START_DATA_TRANSFER" ] }, "FailbackState": { @@ -1861,7 +1994,9 @@ "FAILBACK_IN_PROGRESS", "FAILBACK_READY_FOR_LAUNCH", "FAILBACK_COMPLETED", - "FAILBACK_ERROR" + "FAILBACK_ERROR", + "FAILBACK_NOT_READY_FOR_LAUNCH", + "FAILBACK_LAUNCH_STATE_NOT_AVAILABLE" ] }, "GetFailbackReplicationConfigurationRequest": { @@ -2399,6 +2534,13 @@ }, "documentation": "Operating System.
" }, + "OriginEnvironment": { + "type": "string", + "enum": [ + "ON_PREMISES", + "AWS" + ] + }, "PITPolicy": { "type": "list", "member": { @@ -2510,6 +2652,10 @@ "shape": "JobID", "documentation": "The ID of the Job that created the Recovery Instance.
" }, + "originEnvironment": { + "shape": "OriginEnvironment", + "documentation": "Environment (On Premises / AWS) of the instance that the recovery instance originated from.
" + }, "pointInTimeSnapshotDateTime": { "shape": "ISO8601DatetimeString", "documentation": "The date and time of the Point in Time (PIT) snapshot that this Recovery Instance was launched from.
" @@ -2648,7 +2794,18 @@ "DOWNLOAD_REPLICATION_SOFTWARE_TO_FAILBACK_CLIENT", "CONFIGURE_REPLICATION_SOFTWARE", "PAIR_AGENT_WITH_REPLICATION_SOFTWARE", - "ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION" + "ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION", + "WAIT", + "CREATE_SECURITY_GROUP", + "LAUNCH_REPLICATION_SERVER", + "BOOT_REPLICATION_SERVER", + "AUTHENTICATE_WITH_SERVICE", + "DOWNLOAD_REPLICATION_SOFTWARE", + "CREATE_STAGING_DISKS", + "ATTACH_STAGING_DISKS", + "PAIR_REPLICATION_SERVER_WITH_AGENT", + "CONNECT_AGENT_TO_REPLICATION_SERVER", + "START_DATA_TRANSFER" ] }, "RecoveryInstanceDataReplicationInitiationStepStatus": { @@ -2679,7 +2836,9 @@ "PAUSED", "RESCAN", "STALLED", - "DISCONNECTED" + "DISCONNECTED", + "REPLICATION_STATE_NOT_AVAILABLE", + "NOT_STARTED" ] }, "RecoveryInstanceDisk": { @@ -2735,6 +2894,10 @@ "shape": "JobID", "documentation": "The Job ID of the last failback log for this Recovery Instance.
" }, + "failbackLaunchType": { + "shape": "FailbackLaunchType", + "documentation": "The launch type (Recovery / Drill) of the last launch for the failback replication of this recovery instance.
" + }, "failbackToOriginalServer": { "shape": "Boolean", "documentation": "Whether we are failing back to the original Source Server for this Recovery Instance.
" @@ -2965,7 +3128,7 @@ }, "optimizedStagingDiskType": { "shape": "ReplicationConfigurationReplicatedDiskStagingDiskType", - "documentation": "The Staging Disk EBS volume type to be used during replication when stagingDiskType
is set to Auto. This is a read-only field.
When stagingDiskType
is set to Auto, this field shows the current staging disk EBS volume type as it is constantly updated by the service. This is a read-only field.
Replication direction designates if this is a failover replication, or a failback replication. When a DRS agent is installed on an instance, the replication direction is failover. In cases where a recovery launch was made in the recovery location and a new recovery instance was created, and then a failback replication was initiated from that recovery instance back to the origin location, then the replication direction will be failback.
", + "enum": [ + "FAILOVER", + "FAILBACK" + ] + }, "ReplicationServersSecurityGroupsIDs": { "type": "list", "member": { @@ -3110,6 +3281,27 @@ } } }, + "ReverseReplicationRequest": { + "type": "structure", + "required": [ + "recoveryInstanceID" + ], + "members": { + "recoveryInstanceID": { + "shape": "RecoveryInstanceID", + "documentation": "The ID of the Recovery Instance that we want to reverse the replication for.
" + } + } + }, + "ReverseReplicationResponse": { + "type": "structure", + "members": { + "reversedDirectionSourceServerArn": { + "shape": "SourceServerARN", + "documentation": "ARN of created SourceServer.
" + } + } + }, "SecurityGroupID": { "type": "string", "max": 255, @@ -3121,6 +3313,24 @@ "max": 128, "min": 0 }, + "SourceCloudProperties": { + "type": "structure", + "members": { + "originAccountID": { + "shape": "AccountID", + "documentation": "AWS Account ID for an EC2-originated Source Server.
" + }, + "originAvailabilityZone": { + "shape": "AwsAvailabilityZone", + "documentation": "AWS Availability Zone for an EC2-originated Source Server.
" + }, + "originRegion": { + "shape": "AwsRegion", + "documentation": "AWS Region for an EC2-originated Source Server.
" + } + }, + "documentation": "Properties of the cloud environment where this Source Server originated from.
" + }, "SourceProperties": { "type": "structure", "members": { @@ -3182,6 +3392,18 @@ "shape": "RecoveryInstanceID", "documentation": "The ID of the Recovery Instance associated with this Source Server.
" }, + "replicationDirection": { + "shape": "ReplicationDirection", + "documentation": "Replication direction of the Source Server.
" + }, + "reversedDirectionSourceServerArn": { + "shape": "SourceServerARN", + "documentation": "For EC2-originated Source Servers which have been failed over and then failed back, this value will mean the ARN of the Source Server on the opposite replication direction.
" + }, + "sourceCloudProperties": { + "shape": "SourceCloudProperties", + "documentation": "Source cloud properties of the Source Server.
" + }, "sourceProperties": { "shape": "SourceProperties", "documentation": "The source properties of the Source Server.
" @@ -3357,6 +3579,27 @@ } } }, + "StartReplicationRequest": { + "type": "structure", + "required": [ + "sourceServerID" + ], + "members": { + "sourceServerID": { + "shape": "SourceServerID", + "documentation": "The ID of the Source Server to start replication for.
" + } + } + }, + "StartReplicationResponse": { + "type": "structure", + "members": { + "sourceServer": { + "shape": "SourceServer", + "documentation": "The Source Server that this action was targeted on.
" + } + } + }, "StopFailbackRequest": { "type": "structure", "required": [ @@ -3369,6 +3612,27 @@ } } }, + "StopReplicationRequest": { + "type": "structure", + "required": [ + "sourceServerID" + ], + "members": { + "sourceServerID": { + "shape": "SourceServerID", + "documentation": "The ID of the Source Server to stop replication for.
" + } + } + }, + "StopReplicationResponse": { + "type": "structure", + "members": { + "sourceServer": { + "shape": "SourceServer", + "documentation": "The Source Server that this action was targeted on.
" + } + } + }, "StrictlyPositiveInteger": { "type": "integer", "min": 1 diff --git a/apis/ecs-2014-11-13.min.json b/apis/ecs-2014-11-13.min.json index e4b8ebd8d4..2d50237a11 100644 --- a/apis/ecs-2014-11-13.min.json +++ b/apis/ecs-2014-11-13.min.json @@ -58,6 +58,9 @@ }, "defaultCapacityProviderStrategy": { "shape": "Ss" + }, + "serviceConnectDefaults": { + "shape": "Sw" } } }, @@ -65,7 +68,7 @@ "type": "structure", "members": { "cluster": { - "shape": "Sx" + "shape": "Sy" } } } @@ -81,10 +84,10 @@ "serviceName": {}, "taskDefinition": {}, "loadBalancers": { - "shape": "S15" + "shape": "S17" }, "serviceRegistries": { - "shape": "S18" + "shape": "S1a" }, "desiredCount": { "type": "integer" @@ -97,23 +100,23 @@ "platformVersion": {}, "role": {}, "deploymentConfiguration": { - "shape": "S1b" + "shape": "S1d" }, "placementConstraints": { - "shape": "S1d" + "shape": "S1f" }, "placementStrategy": { - "shape": "S1g" + "shape": "S1i" }, "networkConfiguration": { - "shape": "S1j" + "shape": "S1l" }, "healthCheckGracePeriodSeconds": { "type": "integer" }, "schedulingStrategy": {}, "deploymentController": { - "shape": "S1n" + "shape": "S1p" }, "tags": { "shape": "Sa" @@ -124,6 +127,9 @@ "propagateTags": {}, "enableExecuteCommand": { "type": "boolean" + }, + "serviceConnectConfiguration": { + "shape": "S1s" } } }, @@ -131,7 +137,7 @@ "type": "structure", "members": { "service": { - "shape": "S1r" + "shape": "S24" } } } @@ -150,13 +156,13 @@ "externalId": {}, "taskDefinition": {}, "networkConfiguration": { - "shape": "S1j" + "shape": "S1l" }, "loadBalancers": { - "shape": "S15" + "shape": "S17" }, "serviceRegistries": { - "shape": "S18" + "shape": "S1a" }, "launchType": {}, "capacityProviderStrategy": { @@ -164,7 +170,7 @@ }, "platformVersion": {}, "scale": { - "shape": "S1v" + "shape": "S28" }, "clientToken": {}, "tags": { @@ -176,7 +182,7 @@ "type": "structure", "members": { "taskSet": { - "shape": "S1t" + "shape": "S26" } } } @@ -196,7 +202,7 @@ "type": "structure", "members": { "setting": { - "shape": "S29" + "shape": "S2o" } } } @@ -210,7 +216,7 @@ "members": { "cluster": {}, "attributes": { - "shape": "S2b" + "shape": "S2q" } } }, @@ -218,7 +224,7 @@ "type": "structure", "members": { "attributes": { - "shape": "S2b" + "shape": "S2q" } } } @@ -256,7 +262,7 @@ "type": "structure", "members": { "cluster": { - "shape": "Sx" + "shape": "Sy" } } } @@ -279,7 +285,7 @@ "type": "structure", "members": { "service": { - "shape": "S1r" + "shape": "S24" } } } @@ -305,7 +311,7 @@ "type": "structure", "members": { "taskSet": { - "shape": "S1t" + "shape": "S26" } } } @@ -328,7 +334,7 @@ "type": "structure", "members": { "containerInstance": { - "shape": "S2q" + "shape": "S35" } } } @@ -347,7 +353,7 @@ "type": "structure", "members": { "taskDefinition": { - "shape": "S33" + "shape": "S3i" } } } @@ -379,7 +385,7 @@ } }, "failures": { - "shape": "S5d" + "shape": "S5o" }, "nextToken": {} } @@ -404,11 +410,11 @@ "clusters": { "type": "list", "member": { - "shape": "Sx" + "shape": "Sy" } }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -434,10 +440,10 @@ "type": "structure", "members": { "containerInstances": { - "shape": "S5o" + "shape": "S5z" }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -465,11 +471,11 @@ "services": { "type": "list", "member": { - "shape": "S1r" + "shape": "S24" } }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -492,7 +498,7 @@ "type": "structure", "members": { "taskDefinition": { - "shape": "S33" + "shape": "S3i" }, "tags": { "shape": "Sa" @@ -523,10 +529,10 @@ "type": "structure", "members": { "taskSets": { - "shape": "S1s" + "shape": "S25" }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -552,10 +558,10 @@ "type": "structure", "members": { "tasks": { - "shape": "S66" + "shape": "S6h" }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -572,7 +578,8 @@ "type": "structure", "members": { "endpoint": {}, - "telemetryEndpoint": {} + "telemetryEndpoint": {}, + "serviceConnectEndpoint": {} } } }, @@ -635,10 +642,10 @@ "type": "structure", "members": { "protectedTasks": { - "shape": "S6y" + "shape": "S79" }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -665,7 +672,7 @@ "settings": { "type": "list", "member": { - "shape": "S29" + "shape": "S2o" } }, "nextToken": {} @@ -693,7 +700,7 @@ "type": "structure", "members": { "attributes": { - "shape": "S2b" + "shape": "S2q" }, "nextToken": {} } @@ -765,6 +772,30 @@ } } }, + "ListServicesByNamespace": { + "input": { + "type": "structure", + "required": [ + "namespace" + ], + "members": { + "namespace": {}, + "nextToken": {}, + "maxResults": { + "type": "integer" + } + } + }, + "output": { + "type": "structure", + "members": { + "serviceArns": { + "shape": "Sr" + }, + "nextToken": {} + } + } + }, "ListTagsForResource": { "input": { "type": "structure", @@ -873,7 +904,7 @@ "type": "structure", "members": { "setting": { - "shape": "S29" + "shape": "S2o" } } } @@ -894,7 +925,7 @@ "type": "structure", "members": { "setting": { - "shape": "S29" + "shape": "S2o" } } } @@ -908,7 +939,7 @@ "members": { "cluster": {}, "attributes": { - "shape": "S2b" + "shape": "S2q" } } }, @@ -916,7 +947,7 @@ "type": "structure", "members": { "attributes": { - "shape": "S2b" + "shape": "S2q" } } } @@ -943,7 +974,7 @@ "type": "structure", "members": { "cluster": { - "shape": "Sx" + "shape": "Sy" } } } @@ -956,14 +987,14 @@ "instanceIdentityDocument": {}, "instanceIdentityDocumentSignature": {}, "totalResources": { - "shape": "S2t" + "shape": "S38" }, "versionInfo": { - "shape": "S2s" + "shape": "S37" }, "containerInstanceArn": {}, "attributes": { - "shape": "S2b" + "shape": "S2q" }, "platformDevices": { "type": "list", @@ -988,7 +1019,7 @@ "type": "structure", "members": { "containerInstance": { - "shape": "S2q" + "shape": "S35" } } } @@ -1006,16 +1037,16 @@ "executionRoleArn": {}, "networkMode": {}, "containerDefinitions": { - "shape": "S34" + "shape": "S3j" }, "volumes": { - "shape": "S4e" + "shape": "S4p" }, "placementConstraints": { - "shape": "S4s" + "shape": "S53" }, "requiresCompatibilities": { - "shape": "S4v" + "shape": "S56" }, "cpu": {}, "memory": {}, @@ -1025,16 +1056,16 @@ "pidMode": {}, "ipcMode": {}, "proxyConfiguration": { - "shape": "S54" + "shape": "S5f" }, "inferenceAccelerators": { - "shape": "S50" + "shape": "S5b" }, "ephemeralStorage": { - "shape": "S57" + "shape": "S5i" }, "runtimePlatform": { - "shape": "S4x" + "shape": "S58" } } }, @@ -1042,7 +1073,7 @@ "type": "structure", "members": { "taskDefinition": { - "shape": "S33" + "shape": "S3i" }, "tags": { "shape": "Sa" @@ -1073,16 +1104,16 @@ "group": {}, "launchType": {}, "networkConfiguration": { - "shape": "S1j" + "shape": "S1l" }, "overrides": { - "shape": "S6k" + "shape": "S6v" }, "placementConstraints": { - "shape": "S1d" + "shape": "S1f" }, "placementStrategy": { - "shape": "S1g" + "shape": "S1i" }, "platformVersion": {}, "propagateTags": {}, @@ -1098,10 +1129,10 @@ "type": "structure", "members": { "tasks": { - "shape": "S66" + "shape": "S6h" }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -1126,10 +1157,10 @@ }, "group": {}, "networkConfiguration": { - "shape": "S1j" + "shape": "S1l" }, "overrides": { - "shape": "S6k" + "shape": "S6v" }, "propagateTags": {}, "referenceId": {}, @@ -1144,10 +1175,10 @@ "type": "structure", "members": { "tasks": { - "shape": "S66" + "shape": "S6h" }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -1168,7 +1199,7 @@ "type": "structure", "members": { "task": { - "shape": "S67" + "shape": "S6i" } } } @@ -1182,7 +1213,7 @@ "members": { "cluster": {}, "attachments": { - "shape": "S89" + "shape": "S8m" } } }, @@ -1207,7 +1238,7 @@ }, "reason": {}, "networkBindings": { - "shape": "S6b" + "shape": "S6m" } } }, @@ -1238,7 +1269,7 @@ "type": "integer" }, "networkBindings": { - "shape": "S6b" + "shape": "S6m" }, "reason": {}, "status": {} @@ -1246,7 +1277,7 @@ } }, "attachments": { - "shape": "S89" + "shape": "S8m" }, "managedAgents": { "type": "list", @@ -1364,6 +1395,9 @@ }, "configuration": { "shape": "Sm" + }, + "serviceConnectDefaults": { + "shape": "Sw" } } }, @@ -1371,7 +1405,7 @@ "type": "structure", "members": { "cluster": { - "shape": "Sx" + "shape": "Sy" } } } @@ -1394,7 +1428,7 @@ "type": "structure", "members": { "cluster": { - "shape": "Sx" + "shape": "Sy" } } } @@ -1414,7 +1448,7 @@ "type": "structure", "members": { "containerInstance": { - "shape": "S2q" + "shape": "S35" } } } @@ -1438,10 +1472,10 @@ "type": "structure", "members": { "containerInstances": { - "shape": "S5o" + "shape": "S5z" }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -1463,16 +1497,16 @@ "shape": "Ss" }, "deploymentConfiguration": { - "shape": "S1b" + "shape": "S1d" }, "networkConfiguration": { - "shape": "S1j" + "shape": "S1l" }, "placementConstraints": { - "shape": "S1d" + "shape": "S1f" }, "placementStrategy": { - "shape": "S1g" + "shape": "S1i" }, "platformVersion": {}, "forceNewDeployment": { @@ -1488,11 +1522,14 @@ "type": "boolean" }, "loadBalancers": { - "shape": "S15" + "shape": "S17" }, "propagateTags": {}, "serviceRegistries": { - "shape": "S18" + "shape": "S1a" + }, + "serviceConnectConfiguration": { + "shape": "S1s" } } }, @@ -1500,7 +1537,7 @@ "type": "structure", "members": { "service": { - "shape": "S1r" + "shape": "S24" } } } @@ -1523,7 +1560,7 @@ "type": "structure", "members": { "taskSet": { - "shape": "S1t" + "shape": "S26" } } } @@ -1553,10 +1590,10 @@ "type": "structure", "members": { "protectedTasks": { - "shape": "S6y" + "shape": "S79" }, "failures": { - "shape": "S5d" + "shape": "S5o" } } } @@ -1575,7 +1612,7 @@ "service": {}, "taskSet": {}, "scale": { - "shape": "S1v" + "shape": "S28" } } }, @@ -1583,7 +1620,7 @@ "type": "structure", "members": { "taskSet": { - "shape": "S1t" + "shape": "S26" } } } @@ -1705,7 +1742,16 @@ } } }, - "Sx": { + "Sw": { + "type": "structure", + "required": [ + "namespace" + ], + "members": { + "namespace": {} + } + }, + "Sy": { "type": "structure", "members": { "clusterArn": {}, @@ -1729,7 +1775,7 @@ "statistics": { "type": "list", "member": { - "shape": "S10" + "shape": "S11" } }, "tags": { @@ -1745,19 +1791,25 @@ "shape": "Ss" }, "attachments": { - "shape": "S11" + "shape": "S12" }, - "attachmentsStatus": {} + "attachmentsStatus": {}, + "serviceConnectDefaults": { + "type": "structure", + "members": { + "namespace": {} + } + } } }, - "S10": { + "S11": { "type": "structure", "members": { "name": {}, "value": {} } }, - "S11": { + "S12": { "type": "list", "member": { "type": "structure", @@ -1768,13 +1820,13 @@ "details": { "type": "list", "member": { - "shape": "S10" + "shape": "S11" } } } } }, - "S15": { + "S17": { "type": "list", "member": { "type": "structure", @@ -1788,7 +1840,7 @@ } } }, - "S18": { + "S1a": { "type": "list", "member": { "type": "structure", @@ -1804,7 +1856,7 @@ } } }, - "S1b": { + "S1d": { "type": "structure", "members": { "deploymentCircuitBreaker": { @@ -1830,7 +1882,7 @@ } } }, - "S1d": { + "S1f": { "type": "list", "member": { "type": "structure", @@ -1840,7 +1892,7 @@ } } }, - "S1g": { + "S1i": { "type": "list", "member": { "type": "structure", @@ -1850,7 +1902,7 @@ } } }, - "S1j": { + "S1l": { "type": "structure", "members": { "awsvpcConfiguration": { @@ -1870,7 +1922,7 @@ } } }, - "S1n": { + "S1p": { "type": "structure", "required": [ "type" @@ -1879,17 +1931,94 @@ "type": {} } }, - "S1r": { + "S1s": { + "type": "structure", + "required": [ + "enabled" + ], + "members": { + "enabled": { + "type": "boolean" + }, + "namespace": {}, + "services": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "portName" + ], + "members": { + "portName": {}, + "discoveryName": {}, + "clientAliases": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "port" + ], + "members": { + "port": { + "type": "integer" + }, + "dnsName": {} + } + } + }, + "ingressPortOverride": { + "type": "integer" + } + } + } + }, + "logConfiguration": { + "shape": "S1y" + } + } + }, + "S1y": { + "type": "structure", + "required": [ + "logDriver" + ], + "members": { + "logDriver": {}, + "options": { + "type": "map", + "key": {}, + "value": {} + }, + "secretOptions": { + "shape": "S21" + } + } + }, + "S21": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "name", + "valueFrom" + ], + "members": { + "name": {}, + "valueFrom": {} + } + } + }, + "S24": { "type": "structure", "members": { "serviceArn": {}, "serviceName": {}, "clusterArn": {}, "loadBalancers": { - "shape": "S15" + "shape": "S17" }, "serviceRegistries": { - "shape": "S18" + "shape": "S1a" }, "status": {}, "desiredCount": { @@ -1909,10 +2038,10 @@ "platformFamily": {}, "taskDefinition": {}, "deploymentConfiguration": { - "shape": "S1b" + "shape": "S1d" }, "taskSets": { - "shape": "S1s" + "shape": "S25" }, "deployments": { "type": "list", @@ -1947,10 +2076,23 @@ "platformVersion": {}, "platformFamily": {}, "networkConfiguration": { - "shape": "S1j" + "shape": "S1l" }, "rolloutState": {}, - "rolloutStateReason": {} + "rolloutStateReason": {}, + "serviceConnectConfiguration": { + "shape": "S1s" + }, + "serviceConnectResources": { + "type": "list", + "member": { + "type": "structure", + "members": { + "discoveryName": {}, + "discoveryArn": {} + } + } + } } } }, @@ -1972,20 +2114,20 @@ "type": "timestamp" }, "placementConstraints": { - "shape": "S1d" + "shape": "S1f" }, "placementStrategy": { - "shape": "S1g" + "shape": "S1i" }, "networkConfiguration": { - "shape": "S1j" + "shape": "S1l" }, "healthCheckGracePeriodSeconds": { "type": "integer" }, "schedulingStrategy": {}, "deploymentController": { - "shape": "S1n" + "shape": "S1p" }, "tags": { "shape": "Sa" @@ -2000,13 +2142,13 @@ } } }, - "S1s": { + "S25": { "type": "list", "member": { - "shape": "S1t" + "shape": "S26" } }, - "S1t": { + "S26": { "type": "structure", "members": { "id": {}, @@ -2039,16 +2181,16 @@ "platformVersion": {}, "platformFamily": {}, "networkConfiguration": { - "shape": "S1j" + "shape": "S1l" }, "loadBalancers": { - "shape": "S15" + "shape": "S17" }, "serviceRegistries": { - "shape": "S18" + "shape": "S1a" }, "scale": { - "shape": "S1v" + "shape": "S28" }, "stabilityStatus": {}, "stabilityStatusAt": { @@ -2059,7 +2201,7 @@ } } }, - "S1v": { + "S28": { "type": "structure", "members": { "value": { @@ -2068,7 +2210,7 @@ "unit": {} } }, - "S29": { + "S2o": { "type": "structure", "members": { "name": {}, @@ -2076,13 +2218,13 @@ "principalArn": {} } }, - "S2b": { + "S2q": { "type": "list", "member": { - "shape": "S2c" + "shape": "S2r" } }, - "S2c": { + "S2r": { "type": "structure", "required": [ "name" @@ -2094,7 +2236,7 @@ "targetId": {} } }, - "S2q": { + "S35": { "type": "structure", "members": { "containerInstanceArn": {}, @@ -2104,13 +2246,13 @@ "type": "long" }, "versionInfo": { - "shape": "S2s" + "shape": "S37" }, "remainingResources": { - "shape": "S2t" + "shape": "S38" }, "registeredResources": { - "shape": "S2t" + "shape": "S38" }, "status": {}, "statusReason": {}, @@ -2125,13 +2267,13 @@ }, "agentUpdateStatus": {}, "attributes": { - "shape": "S2b" + "shape": "S2q" }, "registeredAt": { "type": "timestamp" }, "attachments": { - "shape": "S11" + "shape": "S12" }, "tags": { "shape": "Sa" @@ -2160,7 +2302,7 @@ } } }, - "S2s": { + "S37": { "type": "structure", "members": { "agentVersion": {}, @@ -2168,7 +2310,7 @@ "dockerVersion": {} } }, - "S2t": { + "S38": { "type": "list", "member": { "type": "structure", @@ -2190,12 +2332,12 @@ } } }, - "S33": { + "S3i": { "type": "structure", "members": { "taskDefinitionArn": {}, "containerDefinitions": { - "shape": "S34" + "shape": "S3j" }, "family": {}, "taskRoleArn": {}, @@ -2205,36 +2347,36 @@ "type": "integer" }, "volumes": { - "shape": "S4e" + "shape": "S4p" }, "status": {}, "requiresAttributes": { "type": "list", "member": { - "shape": "S2c" + "shape": "S2r" } }, "placementConstraints": { - "shape": "S4s" + "shape": "S53" }, "compatibilities": { - "shape": "S4v" + "shape": "S56" }, "runtimePlatform": { - "shape": "S4x" + "shape": "S58" }, "requiresCompatibilities": { - "shape": "S4v" + "shape": "S56" }, "cpu": {}, "memory": {}, "inferenceAccelerators": { - "shape": "S50" + "shape": "S5b" }, "pidMode": {}, "ipcMode": {}, "proxyConfiguration": { - "shape": "S54" + "shape": "S5f" }, "registeredAt": { "type": "timestamp" @@ -2244,11 +2386,11 @@ }, "registeredBy": {}, "ephemeralStorage": { - "shape": "S57" + "shape": "S5i" } } }, - "S34": { + "S3j": { "type": "list", "member": { "type": "structure", @@ -2287,7 +2429,9 @@ "hostPort": { "type": "integer" }, - "protocol": {} + "protocol": {}, + "name": {}, + "appProtocol": {} } } }, @@ -2301,10 +2445,10 @@ "shape": "Sr" }, "environment": { - "shape": "S3a" + "shape": "S3q" }, "environmentFiles": { - "shape": "S3b" + "shape": "S3r" }, "mountPoints": { "type": "list", @@ -2396,7 +2540,7 @@ } }, "secrets": { - "shape": "S3q" + "shape": "S21" }, "dependsOn": { "type": "list", @@ -2485,21 +2629,7 @@ } }, "logConfiguration": { - "type": "structure", - "required": [ - "logDriver" - ], - "members": { - "logDriver": {}, - "options": { - "type": "map", - "key": {}, - "value": {} - }, - "secretOptions": { - "shape": "S3q" - } - } + "shape": "S1y" }, "healthCheck": { "type": "structure", @@ -2535,7 +2665,7 @@ } }, "resourceRequirements": { - "shape": "S47" + "shape": "S4i" }, "firelensConfiguration": { "type": "structure", @@ -2554,13 +2684,13 @@ } } }, - "S3a": { + "S3q": { "type": "list", "member": { - "shape": "S10" + "shape": "S11" } }, - "S3b": { + "S3r": { "type": "list", "member": { "type": "structure", @@ -2574,21 +2704,7 @@ } } }, - "S3q": { - "type": "list", - "member": { - "type": "structure", - "required": [ - "name", - "valueFrom" - ], - "members": { - "name": {}, - "valueFrom": {} - } - } - }, - "S47": { + "S4i": { "type": "list", "member": { "type": "structure", @@ -2602,7 +2718,7 @@ } } }, - "S4e": { + "S4p": { "type": "list", "member": { "type": "structure", @@ -2623,10 +2739,10 @@ }, "driver": {}, "driverOpts": { - "shape": "S4j" + "shape": "S4u" }, "labels": { - "shape": "S4j" + "shape": "S4u" } } }, @@ -2677,12 +2793,12 @@ } } }, - "S4j": { + "S4u": { "type": "map", "key": {}, "value": {} }, - "S4s": { + "S53": { "type": "list", "member": { "type": "structure", @@ -2692,18 +2808,18 @@ } } }, - "S4v": { + "S56": { "type": "list", "member": {} }, - "S4x": { + "S58": { "type": "structure", "members": { "cpuArchitecture": {}, "operatingSystemFamily": {} } }, - "S50": { + "S5b": { "type": "list", "member": { "type": "structure", @@ -2717,7 +2833,7 @@ } } }, - "S54": { + "S5f": { "type": "structure", "required": [ "containerName" @@ -2728,12 +2844,12 @@ "properties": { "type": "list", "member": { - "shape": "S10" + "shape": "S11" } } } }, - "S57": { + "S5i": { "type": "structure", "required": [ "sizeInGiB" @@ -2744,7 +2860,7 @@ } } }, - "S5d": { + "S5o": { "type": "list", "member": { "type": "structure", @@ -2755,26 +2871,26 @@ } } }, - "S5o": { + "S5z": { "type": "list", "member": { - "shape": "S2q" + "shape": "S35" } }, - "S66": { + "S6h": { "type": "list", "member": { - "shape": "S67" + "shape": "S6i" } }, - "S67": { + "S6i": { "type": "structure", "members": { "attachments": { - "shape": "S11" + "shape": "S12" }, "attributes": { - "shape": "S2b" + "shape": "S2q" }, "availabilityZone": {}, "capacityProviderName": {}, @@ -2801,7 +2917,7 @@ }, "reason": {}, "networkBindings": { - "shape": "S6b" + "shape": "S6m" }, "networkInterfaces": { "type": "list", @@ -2853,13 +2969,13 @@ "group": {}, "healthStatus": {}, "inferenceAccelerators": { - "shape": "S50" + "shape": "S5b" }, "lastStatus": {}, "launchType": {}, "memory": {}, "overrides": { - "shape": "S6k" + "shape": "S6v" }, "platformVersion": {}, "platformFamily": {}, @@ -2890,11 +3006,11 @@ "type": "long" }, "ephemeralStorage": { - "shape": "S57" + "shape": "S5i" } } }, - "S6b": { + "S6m": { "type": "list", "member": { "type": "structure", @@ -2910,7 +3026,7 @@ } } }, - "S6k": { + "S6v": { "type": "structure", "members": { "containerOverrides": { @@ -2923,10 +3039,10 @@ "shape": "Sr" }, "environment": { - "shape": "S3a" + "shape": "S3q" }, "environmentFiles": { - "shape": "S3b" + "shape": "S3r" }, "cpu": { "type": "integer" @@ -2938,7 +3054,7 @@ "type": "integer" }, "resourceRequirements": { - "shape": "S47" + "shape": "S4i" } } } @@ -2958,11 +3074,11 @@ "memory": {}, "taskRoleArn": {}, "ephemeralStorage": { - "shape": "S57" + "shape": "S5i" } } }, - "S6y": { + "S79": { "type": "list", "member": { "type": "structure", @@ -2977,7 +3093,7 @@ } } }, - "S89": { + "S8m": { "type": "list", "member": { "type": "structure", diff --git a/apis/ecs-2014-11-13.normal.json b/apis/ecs-2014-11-13.normal.json index ac6d19997a..6cfafe6a09 100644 --- a/apis/ecs-2014-11-13.normal.json +++ b/apis/ecs-2014-11-13.normal.json @@ -105,6 +105,9 @@ }, { "shape": "AccessDeniedException" + }, + { + "shape": "NamespaceNotFoundException" } ], "documentation": "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount
, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
There are two service scheduler strategies available:
REPLICA
- The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
DAEMON
- The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent
is 100%. The default value for a daemon service for minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING
state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING
state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING
state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING
or PENDING
state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING
state. This is while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide.
" @@ -151,6 +154,9 @@ }, { "shape": "ServiceNotActiveException" + }, + { + "shape": "NamespaceNotFoundException" } ], "documentation": "Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
Runs a command remotely on a container within a task.
If you use a condition key in your IAM policy to refine the conditions for the policy statement, for example limit the actions to a specific cluster, you recevie an AccessDeniedException
when there is a mismatch between the condition key value and the corresponding parameter value.
Runs a command remotely on a container within a task.
If you use a condition key in your IAM policy to refine the conditions for the policy statement, for example limit the actions to a specific cluster, you receive an AccessDeniedException
when there is a mismatch between the condition key value and the corresponding parameter value.
Returns a list of services. You can filter the results by cluster, launch type, and scheduling strategy.
" }, + "ListServicesByNamespace": { + "name": "ListServicesByNamespace", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "ListServicesByNamespaceRequest" + }, + "output": { + "shape": "ListServicesByNamespaceResponse" + }, + "errors": [ + { + "shape": "ServerException" + }, + { + "shape": "ClientException" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "NamespaceNotFoundException" + } + ], + "documentation": "This operation lists all of the services that are associated with a Cloud Map namespace. This list might include services in different clusters. In contrast, ListServices
can only list services in one cluster at a time. If you need to filter the list of services in a single cluster by various parameters, use ListServices
. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent doesn't interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.
The UpdateContainerAgent
API isn't supported for container instances using the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, you can update the ecs-init
package. This updates the agent. For more information, see Updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
The UpdateContainerAgent
API requires an Amazon ECS-optimized AMI or Amazon Linux AMI with the ecs-init
service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent doesn't interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.
The UpdateContainerAgent
API isn't supported for container instances using the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, you can update the ecs-init
package. This updates the agent. For more information, see Updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Agent updates with the UpdateContainerAgent
API operation do not apply to Windows container instances. We recommend that you launch new container instances to update the agent version in your Windows clusters.
The UpdateContainerAgent
API requires an Amazon ECS-optimized AMI or Amazon Linux AMI with the ecs-init
service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Modifies the parameters of a service.
For services using the rolling update (ECS
) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.
You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount
parameter.
If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.
If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest
), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment
option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.
You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent
and maximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during a deployment. For example, if desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent of docker stop
is issued to the containers running in the task. This results in a SIGTERM
and a 30-second timeout. After this, SIGKILL
is sent and the containers are forcibly stopped. If the container handles the SIGTERM
gracefully and exits within 30 seconds from receiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.
Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.
Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:
Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.
Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.
You must have a service-linked role when you update any of the following service properties. If you specified a custom IAM role when you created the service, Amazon ECS automatically replaces the roleARN associated with the service with the ARN of your service-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide.
loadBalancers,
serviceRegistries
Updates the protection status of a task. You can set protectionEnabled
to true
to protect your task from termination during scale-in events from Service Autoscaling or deployments.
Task-protection, by default, expires after 2 hours at which point Amazon ECS unsets the protectionEnabled
property making the task eligible for termination by a subsequent scale-in event.
You can specify a custom expiration period for task protection from 1 minute to up to 2,880 minutes (48 hours). To specify the custom expiration period, set the expiresInMinutes
property. The expiresInMinutes
property is always reset when you invoke this operation for a task that already has protectionEnabled
set to true
. You can keep extending the protection expiration period of a task by invoking this operation repeatedly.
To learn more about Amazon ECS task protection, see Task scale-in protection in the Amazon Elastic Container Service Developer Guide.
This operation is only supported for tasks belonging to an Amazon ECS service. Invoking this operation for a standalone task will result in an TASK_NOT_VALID
failure. For more information, see API failure reasons.
If you prefer to set task protection from within the container, we recommend using the Amazon ECS container agent endpoint.
Updates the protection status of a task. You can set protectionEnabled
to true
to protect your task from termination during scale-in events from Service Autoscaling or deployments.
Task-protection, by default, expires after 2 hours at which point Amazon ECS unsets the protectionEnabled
property making the task eligible for termination by a subsequent scale-in event.
You can specify a custom expiration period for task protection from 1 minute to up to 2,880 minutes (48 hours). To specify the custom expiration period, set the expiresInMinutes
property. The expiresInMinutes
property is always reset when you invoke this operation for a task that already has protectionEnabled
set to true
. You can keep extending the protection expiration period of a task by invoking this operation repeatedly.
To learn more about Amazon ECS task protection, see Task scale-in protection in the Amazon Elastic Container Service Developer Guide .
This operation is only supported for tasks belonging to an Amazon ECS service. Invoking this operation for a standalone task will result in an TASK_NOT_VALID
failure. For more information, see API failure reasons.
If you prefer to set task protection from within the container, we recommend using the Task scale-in protection endpoint.
The value of the attribute. The value
must contain between 1 and 128 characters. It can contain letters (uppercase and lowercase), numbers, hyphens (-), underscores (_), periods (.), at signs (@), forward slashes (/), back slashes (\\), colons (:), or spaces. The value can't can't start or end with a space.
The value of the attribute. The value
must contain between 1 and 128 characters. It can contain letters (uppercase and lowercase), numbers, hyphens (-), underscores (_), periods (.), at signs (@), forward slashes (/), back slashes (\\), colons (:), or spaces. The value can't start or end with a space.
The status of the capacity providers associated with the cluster. The following are the states that are returned.
The available capacity providers for the cluster are updating.
The capacity providers have successfully updated.
The capacity provider updates failed.
Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the enabled
parameter to true
in the ServiceConnectConfiguration
. You can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default parameter.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } }, "documentation": "A regional grouping of one or more container instances where you can run task requests. Each account receives a default cluster the first time you use the Amazon ECS service, but you may also create other clusters. Clusters may contain more than one instance type simultaneously.
" @@ -2024,6 +2073,29 @@ "shape": "ClusterField" } }, + "ClusterServiceConnectDefaults": { + "type": "structure", + "members": { + "namespace": { + "shape": "String", + "documentation": "The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace. When you create a service and don't specify a Service Connect configuration, this namespace is used.
" + } + }, + "documentation": "Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the enabled
parameter to true
in the ServiceConnectConfiguration
. You can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default parameter.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + }, + "ClusterServiceConnectDefaultsRequest": { + "type": "structure", + "required": [ + "namespace" + ], + "members": { + "namespace": { + "shape": "String", + "documentation": "The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace that's used when you create a service and don't specify a Service Connect configuration. Up to 1024 characters are allowed. The name is case-sensitive. The characters can't include hyphens (-), tilde (~), greater than (>), less than (<), or slash (/).
If you enter an existing namespace name or ARN, then that namespace will be used. Any namespace type is supported. The namespace must be in this account and this Amazon Web Services Region.
If you enter a new name, a Cloud Map namespace will be created. Amazon ECS creates a Cloud Map namespace with the \"API calls\" method of instance discovery only. This instance discovery method is the \"HTTP\" namespace type in the Command Line Interface. Other types of instance discovery aren't used by Service Connect.
If you update the service with an empty string \"\"
for the namespace name, the cluster configuration for Service Connect is removed. Note that the namespace will remain in Cloud Map and must be deleted separately.
For more information about Cloud Map, see Working with Services in the Cloud Map Developer Guide.
" + } + }, + "documentation": "Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the enabled
parameter to true
in the ServiceConnectConfiguration
. You can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default parameter.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + }, "ClusterSetting": { "type": "structure", "members": { @@ -2297,7 +2369,7 @@ }, "ulimits": { "shape": "UlimitList", - "documentation": "A list of ulimits
to set in the container. If a ulimit value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits
in the Create a container section of the Docker Remote API and the --ulimit
option to docker run. Valid naming values are displayed in the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 1024
and hard limit is 4096
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
This parameter is not supported for Windows containers.
A list of ulimits
to set in the container. If a ulimit
value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits
in the Create a container section of the Docker Remote API and the --ulimit
option to docker run. Valid naming values are displayed in the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 1024
and hard limit is 4096
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
This parameter is not supported for Windows containers.
The capacity provider strategy to set as the default for the cluster. After a default capacity provider strategy is set for a cluster, when you call the RunTask or CreateService APIs with no capacity provider strategy or launch type specified, the default capacity provider strategy for the cluster is used.
If a default capacity provider strategy isn't defined for a cluster when it was created, it can be defined later with the PutClusterCapacityProviders API operation.
" + }, + "serviceConnectDefaults": { + "shape": "ClusterServiceConnectDefaultsRequest", + "documentation": "Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the enabled
parameter to true
in the ServiceConnectConfiguration
. You can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default parameter.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } }, @@ -2699,7 +2775,7 @@ }, "healthCheckGracePeriodSeconds": { "shape": "BoxedInteger", - "documentation": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of 0
is used.
If you do not use an Elastic Load Balancing, we recomend that you use the startPeriod
in the task definition healtch check parameters. For more information, see Health check.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.
" + "documentation": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of 0
is used.
If you do not use an Elastic Load Balancing, we recommend that you use the startPeriod
in the task definition health check parameters. For more information, see Health check.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.
" }, "schedulingStrategy": { "shape": "SchedulingStrategy", @@ -2724,6 +2800,10 @@ "enableExecuteCommand": { "shape": "Boolean", "documentation": "Determines whether the execute command functionality is enabled for the service. If true
, this enables execute command functionality on all containers in the service tasks.
The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } }, @@ -3029,6 +3109,14 @@ "rolloutStateReason": { "shape": "String", "documentation": "A description of the rollout state of a deployment.
" + }, + "serviceConnectConfiguration": { + "shape": "ServiceConnectConfiguration", + "documentation": "The details of the Service Connect configuration that's used by this deployment. Compare the configuration between multiple deployments when troubleshooting issues with new deployments.
The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + }, + "serviceConnectResources": { + "shape": "ServiceConnectServiceResourceList", + "documentation": "The list of Service Connect resources that are associated with this deployment. Each list entry maps a discovery name to a Cloud Map service name.
" } }, "documentation": "The details of an Amazon ECS service deployment. This is used only when a service uses the ECS
deployment controller type.
Determines whether to include additional information about the clusters in the response. If this field is omitted, this information isn't included.
If ATTACHMENTS
is specified, the attachments for the container instances or tasks within the cluster are included.
If SETTINGS
is specified, the settings for the cluster are included.
If CONFIGURATIONS
is specified, the configuration for the cluster is included.
If STATISTICS
is specified, the task and service count is included, separated by launch type.
If TAGS
is specified, the metadata tags associated with the cluster are included.
Determines whether to include additional information about the clusters in the response. If this field is omitted, this information isn't included.
If ATTACHMENTS
is specified, the attachments for the container instances or tasks within the cluster are included, for example the capacity providers.
If SETTINGS
is specified, the settings for the cluster are included.
If CONFIGURATIONS
is specified, the configuration for the cluster is included.
If STATISTICS
is specified, the task and service count is included, separated by launch type.
If TAGS
is specified, the metadata tags associated with the cluster are included.
The telemetry endpoint for the Amazon ECS agent.
" + }, + "serviceConnectEndpoint": { + "shape": "String", + "documentation": "The endpoint for the Amazon ECS agent to poll for Service Connect configuration. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } }, @@ -3874,7 +3966,7 @@ "documentation": "The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the startPeriod
is disabled.
If a health check succeeds within the startPeriod
, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.
An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).
The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.
You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.
The following describes the possible healthStatus
values for a container:
HEALTHY
-The container health check has passed successfully.
UNHEALTHY
-The container health check has failed.
UNKNOWN
-The container health check is being evaluated or there's no container health check defined.
The following describes the possible healthStatus
values for a task. The container health check status of nonessential containers do not have an effect on the health status of a task.
HEALTHY
-All essential containers within the task have passed their health checks.
UNHEALTHY
-One or more essential containers have failed their health check.
UNKNOWN
-The essential containers within the task are still having their health checks evaluated or there are no container health checks defined.
If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.
The following are notes about container health check support:
Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.
Container health checks are supported for Fargate tasks if you're using platform version 1.1.0
or greater. For more information, see Fargate platform versions.
Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.
An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile).
The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.
You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.
The following describes the possible healthStatus
values for a container:
HEALTHY
-The container health check has passed successfully.
UNHEALTHY
-The container health check has failed.
UNKNOWN
-The container health check is being evaluated or there's no container health check defined.
The following describes the possible healthStatus
values for a task. The container health check status of nonessential containers only affects the health status of a task if no essential containers have health checks defined.
HEALTHY
-All essential containers within the task have passed their health checks.
UNHEALTHY
-One or more essential containers have failed their health check.
UNKNOWN
-The essential containers within the task are still having their health checks evaluated or there are only nonessential containers with health checks defined.
If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.
For tasks that are a part of a service and the service uses the ECS
rolling deployment type, the deployment is paused while the new tasks have the UNKNOWN
task health check status. For example, tasks that define health checks for nonessential containers when no essential containers have health checks will have the UNKNOWN
health check status indefinitely which prevents the deployment from completing.
The following are notes about container health check support:
Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.
Container health checks are supported for Fargate tasks if you're using platform version 1.1.0
or greater. For more information, see Fargate platform versions.
Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.
The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace to list the services in.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + }, + "nextToken": { + "shape": "String", + "documentation": "The nextToken
value that's returned from a ListServicesByNamespace
request. It indicates that more results are available to fulfill the request and further calls are needed. If maxResults
is returned, it is possible the number of results is less than maxResults
.
The maximum number of service results that ListServicesByNamespace
returns in paginated output. When this parameter is used, ListServicesByNamespace
only returns maxResults
results in a single page along with a nextToken
response element. The remaining results of the initial request can be seen by sending another ListServicesByNamespace
request with the returned nextToken
value. This value can be between 1 and 100. If this parameter isn't used, then ListServicesByNamespace
returns up to 10 results and a nextToken
value if applicable.
The list of full ARN entries for each service that's associated with the specified namespace.
" + }, + "nextToken": { + "shape": "String", + "documentation": "The nextToken
value to include in a future ListServicesByNamespace
request. When the results of a ListServicesByNamespace
request exceed maxResults
, this value can be used to retrieve the next page of results. When there are no more results to return, this value is null
.
The startedBy
value to filter the task results with. Specifying a startedBy
value limits the results to tasks that were started with that value.
The startedBy
value to filter the task results with. Specifying a startedBy
value limits the results to tasks that were started with that value.
When you specify startedBy
as the filter, it must be the only filter that you use.
The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 10000
is used.
The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 1
is used.
The protocol used for the port mapping. Valid values are tcp
and udp
. The default is tcp
.
The name that's used for the port mapping. This parameter only applies to Service Connect. This parameter is the name that you use in the serviceConnectConfiguration
of a service. Up to 64 characters are allowed. The characters can include lowercase letters, numbers, underscores (_), and hyphens (-). A hyphen can't be the first character.
For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + }, + "appProtocol": { + "shape": "ApplicationProtocol", + "documentation": "The application protocol that's used for the port mapping. This parameter only applies to Service Connect. We recommend that you set this parameter to be consistent with the protocol that your application uses. If you set this parameter, Amazon ECS adds protocol-specific connection handling to the Service Connect proxy. If you set this parameter, Amazon ECS adds protocol-specific telemetry in the Amazon ECS console and CloudWatch.
If you don't set a value for this parameter, then TCP is used. However, Amazon ECS doesn't add protocol-specific telemetry for TCP.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } }, "documentation": "Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.
If you use containers in a task with the awsvpc
or host
network mode, specify the exposed ports using containerPort
. The hostPort
can be left blank or it must be the same value as the containerPort
.
You can't expose the same container port for multiple protocols. If you attempt this, an error is returned.
After a task reaches the RUNNING
status, manual and automatic host and container port assignments are visible in the networkBindings
section of DescribeTasks API responses.
The CPU architecture.
You can run your Linux tasks on an ARM-based platform by setting the value to ARM64
. This option is avaiable for tasks that run on Linux Amazon EC2 instance or Linux containers on Fargate.
The CPU architecture.
You can run your Linux tasks on an ARM-based platform by setting the value to ARM64
. This option is available for tasks that run on Linux Amazon EC2 instance or Linux containers on Fargate.
Details on a service within a cluster
" }, + "ServiceConnectClientAlias": { + "type": "structure", + "required": [ + "port" + ], + "members": { + "port": { + "shape": "PortNumber", + "documentation": "The listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace.
To avoid changing your applications in client Amazon ECS services, set this to the same port that the client application uses by default. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + }, + "dnsName": { + "shape": "String", + "documentation": "The dnsName
is the name that you use in the applications of client tasks to connect to this service. The name must be a valid DNS name but doesn't need to be fully-qualified. Up to 127 characters are allowed. The characters can include lowercase letters, numbers, underscores (_), hyphens (-), and periods (.). A hyphen can't be the first character.
If this parameter isn't specified, the default value of discoveryName.namespace
is used. If the discoveryName
isn't specified, the portName.namespace
from the task definition is used.
To avoid changing your applications in client Amazon ECS services, set this to the same name that the client application uses by default. For example, a few common names are database
, db
, or the lowercase name of a database, such as mysql
or redis
. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
Each alias (\"endpoint\") is a fully-qualified name and port number that other tasks (\"clients\") can use to connect to this service.
Each name and port mapping must be unique within the namespace.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + }, + "ServiceConnectClientAliasList": { + "type": "list", + "member": { + "shape": "ServiceConnectClientAlias" + } + }, + "ServiceConnectConfiguration": { + "type": "structure", + "required": [ + "enabled" + ], + "members": { + "enabled": { + "shape": "Boolean", + "documentation": "Specifies whether to use Service Connect with this service.
" + }, + "namespace": { + "shape": "String", + "documentation": "The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace for use with Service Connect. The namespace must be in the same Amazon Web Services Region as the Amazon ECS service and cluster. The type of namespace doesn't affect Service Connect. For more information about Cloud Map, see Working with Services in the Cloud Map Developer Guide.
" + }, + "services": { + "shape": "ServiceConnectServiceList", + "documentation": "The list of Service Connect service objects. These are names and aliases (also known as endpoints) that are used by other Amazon ECS services to connect to this service. You can specify up to X (30?) objects per Amazon ECS service.
This field is not required for a \"client\" Amazon ECS service that's a member of a namespace only to connect to other services within the namespace. An example of this would be a frontend application that accepts incoming requests from either a load balancer that's attached to the service or by other means.
An object selects a port from the task definition, assigns a name for the Cloud Map service, and a list of aliases (endpoints) and ports for client applications to refer to this service.
" + }, + "logConfiguration": { + "shape": "LogConfiguration" + } + }, + "documentation": "The Service Connect configuration of your Amazon ECS service. The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + }, + "ServiceConnectService": { + "type": "structure", + "required": [ + "portName" + ], + "members": { + "portName": { + "shape": "String", + "documentation": "The portName
must match the name of one of the portMappings
from all the containers in the task definition of this Amazon ECS service.
The discoveryName
is the name of the new Cloud Map service that Amazon ECS creates for this Amazon ECS service. This must be unique within the Cloud Map namespace. Up to 64 characters are allowed. The characters can include lowercase letters, numbers, underscores (_), and hyphens (-). A hyphen can't be the first character.
If this field isn't specified, portName
is used.
The list of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. The maximum number of client aliases that you can have in this list is 1.
Each alias (\"endpoint\") is a fully-qualified name and port number that other Amazon ECS tasks (\"clients\") can use to connect to this service.
Each name and port mapping must be unique within the namespace.
For each ServiceConnectService
, you must provide at least one clientAlias
with one port
.
The port number for the Service Connect proxy to listen on.
Use the value of this field to bypass the proxy for traffic on the port number specified in the named portMapping
in the task definition of this application, and then use it in your VPC security groups to allow traffic into the proxy for this Amazon ECS service.
In awsvpc
mode and Fargate, the default value is the container port number. The container port number is in the portMapping
in the task definition. In bridge mode, the default value is the ephemeral port of the Service Connect proxy.
The Service Connect service object configuration. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + }, + "ServiceConnectServiceList": { + "type": "list", + "member": { + "shape": "ServiceConnectService" + } + }, + "ServiceConnectServiceResource": { + "type": "structure", + "members": { + "discoveryName": { + "shape": "String", + "documentation": "The discovery name of this Service Connect resource.
The discoveryName
is the name of the new Cloud Map service that Amazon ECS creates for this Amazon ECS service. This must be unique within the Cloud Map namespace. Up to 64 characters are allowed. The characters can include lowercase letters, numbers, underscores (_), and hyphens (-). A hyphen can't be the first character.
If this field isn't specified, portName
is used.
The Amazon Resource Name (ARN) for the namespace in Cloud Map that matches the discovery name for this Service Connect resource. You can use this ARN in other integrations with Cloud Map. However, Service Connect can't ensure connectivity outside of Amazon ECS.
" + } + }, + "documentation": "The Service Connect resource. Each configuration maps a discovery name to a Cloud Map service name. The data is stored in Cloud Map as part of the Service Connect configuration for each discovery name of this Amazon ECS service.
A task can resolve the dnsName
for each of the clientAliases
of a service. However a task can't resolve the discovery names. If you want to connect to a service, refer to the ServiceConnectConfiguration
of that service for the list of clientAliases
that you can use.
The cpu override for the task.
" + "documentation": "The CPU override for the task.
" }, "inferenceAcceleratorOverrides": { "shape": "InferenceAcceleratorOverrides", @@ -6383,7 +6620,7 @@ }, "startedBy": { "shape": "String", - "documentation": "The tag specified when a task set is started. If an CodeDeploy deployment created the task set, the startedBy
parameter is CODE_DEPLOY
. If an external deployment created the task set, the startedBy field isn't used.
The tag specified when a task set is started. If an CodeDeploy deployment created the task set, the startedBy
parameter is CODE_DEPLOY
. If an external deployment created the task set, the startedBy
field isn't used.
The stability status. This indicates whether the task set has reached a steady state. If the following conditions are met, the task set sre in STEADY_STATE
:
The task runningCount
is equal to the computedDesiredCount
.
The pendingCount
is 0
.
There are no tasks that are running on container instances in the DRAINING
status.
All tasks are reporting a healthy status from the load balancers, service discovery, and container health checks.
If any of those conditions aren't met, the stability status returns STABILIZING
.
The stability status. This indicates whether the task set has reached a steady state. If the following conditions are met, the task set are in STEADY_STATE
:
The task runningCount
is equal to the computedDesiredCount
.
The pendingCount
is 0
.
There are no tasks that are running on container instances in the DRAINING
status.
All tasks are reporting a healthy status from the load balancers, service discovery, and container health checks.
If any of those conditions aren't met, the stability status returns STABILIZING
.
The soft limit for the ulimit type.
" + "documentation": "The soft limit for the ulimit
type.
The hard limit for the ulimit type.
" + "documentation": "The hard limit for the ulimit
type.
The ulimit
settings to pass to the container.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 1024
and hard limit is 4096
.
The execute command configuration for the cluster.
" + }, + "serviceConnectDefaults": { + "shape": "ClusterServiceConnectDefaultsRequest", + "documentation": "Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the enabled
parameter to true
in the ServiceConnectConfiguration
. You can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default parameter.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } }, @@ -6672,7 +6916,7 @@ }, "settings": { "shape": "ClusterSettings", - "documentation": "The setting to use by default for a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster. If this value is specified, it overrides the containerInsights
value set with PutAccountSetting or PutAccountSettingDefault.
The setting to use by default for a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster. If this value is specified, it overrides the containerInsights
value set with PutAccountSetting or PutAccountSettingDefault.
Currently, if you delete an existing cluster that does not have Container Insights turned on, and then create a new cluster with the same name with Container Insights tuned on, Container Insights will not actually be turned on. If you want to preserve the same name for your existing cluster and turn on Container Insights, you must wait 7 days before you can re-create it.
etails about the task set.
" + "documentation": "The details about the task set.
" } } }, @@ -6848,6 +7092,10 @@ "serviceRegistries": { "shape": "ServiceRegistries", "documentation": "The details for the service discovery registries to assign to this service. For more information, see Service Discovery.
When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks with the updated service registries configuration, and then stops the old tasks when the new tasks are running.
You can remove existing serviceRegistries
by passing an empty list.
The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.
Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } }, diff --git a/apis/ecs-2014-11-13.paginators.json b/apis/ecs-2014-11-13.paginators.json index 16fa7ccaba..7c738668fc 100644 --- a/apis/ecs-2014-11-13.paginators.json +++ b/apis/ecs-2014-11-13.paginators.json @@ -30,6 +30,12 @@ "output_token": "nextToken", "result_key": "serviceArns" }, + "ListServicesByNamespace": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "serviceArns" + }, "ListTaskDefinitionFamilies": { "input_token": "nextToken", "limit_key": "maxResults", diff --git a/apis/elasticfilesystem-2015-02-01.normal.json b/apis/elasticfilesystem-2015-02-01.normal.json index 9b7fd734ba..b4b506ea53 100644 --- a/apis/elasticfilesystem-2015-02-01.normal.json +++ b/apis/elasticfilesystem-2015-02-01.normal.json @@ -47,7 +47,7 @@ "shape": "ThrottlingException" } ], - "documentation": "Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points.
This operation requires permissions for the elasticfilesystem:CreateAccessPoint
action.
Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points.
If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of 120 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.
This operation requires permissions for the elasticfilesystem:CreateAccessPoint
action.
Specifies the throughput mode for the file system, either bursting
or provisioned
. If you set ThroughputMode
to provisioned
, you must also set a value for ProvisionedThroughputInMibps
. After you create the file system, you can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes, as long as it’s been more than 24 hours since the last decrease or throughput mode change. For more information, see Specifying throughput with provisioned mode in the Amazon EFS User Guide.
Default is bursting
.
Specifies the throughput mode for the file system. The mode can be bursting
, provisioned
, or elastic
. If you set ThroughputMode
to provisioned
, you must also set a value for ProvisionedThroughputInMibps
. After you create the file system, you can decrease your file system's throughput in Provisioned Throughput mode or change between the throughput modes, with certain time restrictions. For more information, see Specifying throughput with provisioned mode in the Amazon EFS User Guide.
Default is bursting
.
Attaches the specified principal to the specified thing. A principal can be X.509 certificates, IAM users, groups, and roles, Amazon Cognito identities or federated identities.
Requires permission to access the AttachThingPrincipal action.
" + "documentation": "Attaches the specified principal to the specified thing. A principal can be X.509 certificates, Amazon Cognito identities or federated identities.
Requires permission to access the AttachThingPrincipal action.
" }, "CancelAuditMitigationActionsTask": { "name": "CancelAuditMitigationActionsTask", @@ -9802,6 +9802,16 @@ "max": 10, "min": 1 }, + "ContentType": { + "type": "string", + "max": 1024, + "min": 0 + }, + "CorrelationData": { + "type": "string", + "max": 1024, + "min": 0 + }, "Count": { "type": "integer" }, @@ -10317,6 +10327,10 @@ "documentParameters": { "shape": "ParameterMap", "documentation": "Parameters of an Amazon Web Services managed template that you can specify to create the job document.
documentParameters
can only be used when creating jobs from Amazon Web Services managed templates. This parameter can't be used with custom job templates or to create jobs from them.
The configuration that allows you to schedule a job for a future date and time in addition to specifying the end behavior for each job execution.
" } } }, @@ -15160,6 +15174,10 @@ "isConcurrent": { "shape": "BooleanWrapperObject", "documentation": "Indicates whether a job is concurrent. Will be true when a job is rolling out new job executions or canceling previously created executions, otherwise false.
" + }, + "schedulingConfig": { + "shape": "SchedulingConfig", + "documentation": "The configuration that allows you to schedule a job for a future date and time in addition to specifying the end behavior for each job execution.
" } }, "documentation": "The Job
object contains details about a job.
Specifies the MQTT context to use for the test authorizer request
" }, + "MqttHeaders": { + "type": "structure", + "members": { + "payloadFormatIndicator": { + "shape": "PayloadFormatIndicator", + "documentation": "An Enum
string value that indicates whether the payload is formatted as UTF-8.
Valid values are UNSPECIFIED_BYTES
and UTF8_DATA
.
For more information, see Payload Format Indicator from the MQTT Version 5.0 specification.
Supports substitution templates.
" + }, + "contentType": { + "shape": "ContentType", + "documentation": "A UTF-8 encoded string that describes the content of the publishing message.
For more information, see Content Type from the MQTT Version 5.0 specification.
Supports substitution templates.
" + }, + "responseTopic": { + "shape": "ResponseTopic", + "documentation": "A UTF-8 encoded string that's used as the topic name for a response message. The response topic is used to describe the topic which the receiver should publish to as part of the request-response flow. The topic must not contain wildcard characters.
For more information, see Response Topic from the MQTT Version 5.0 specification.
Supports substitution templates.
" + }, + "correlationData": { + "shape": "CorrelationData", + "documentation": "The base64-encoded binary data used by the sender of the request message to identify which request the response message is for when it's received.
For more information, see Correlation Data from the MQTT Version 5.0 specification.
This binary data must be based64-encoded.
Supports substitution templates.
" + }, + "messageExpiry": { + "shape": "MessageExpiry", + "documentation": "A user-defined integer value that will persist a message at the message broker for a specified amount of time to ensure that the message will expire if it's no longer relevant to the subscriber. The value of messageExpiry
represents the number of seconds before it expires. For more information about the limits of messageExpiry
, see Amazon Web Services IoT Core message broker and protocol limits and quotas from the Amazon Web Services Reference Guide.
Supports substitution templates.
" + }, + "userProperties": { + "shape": "UserProperties", + "documentation": "An array of key-value pairs that you define in the MQTT5 header.
" + } + }, + "documentation": "Specifies MQTT Version 5.0 headers information. For more information, see MQTT from Amazon Web Services IoT Core Developer Guide.
" + }, "MqttPassword": { "type": "blob", "max": 65535, @@ -18573,8 +18635,6 @@ }, "NamespaceId": { "type": "string", - "max": 64, - "min": 1, "pattern": "[a-zA-Z0-9_-]+" }, "NextToken": { @@ -18911,6 +18971,11 @@ "PayloadField": { "type": "string" }, + "PayloadFormatIndicator": { + "type": "string", + "max": 1024, + "min": 0 + }, "PayloadVersion": { "type": "string", "max": 32, @@ -19761,6 +19826,10 @@ "qos": { "shape": "Qos", "documentation": "The Quality of Service (QoS) level to use when republishing messages. The default value is 0.
" + }, + "headers": { + "shape": "MqttHeaders", + "documentation": "MQTT Version 5.0 headers information. For more information, see MQTT from the Amazon Web Services IoT Core Developer Guide.
" } }, "documentation": "Describes an action to republish to another topic.
" @@ -19857,6 +19926,11 @@ "shape": "Resource" } }, + "ResponseTopic": { + "type": "string", + "max": 1024, + "min": 0 + }, "RetryAttempt": { "type": "integer" }, @@ -20115,6 +20189,24 @@ "min": 1, "pattern": "[a-zA-Z0-9_-]+" }, + "SchedulingConfig": { + "type": "structure", + "members": { + "startTime": { + "shape": "StringDateTime", + "documentation": "The time a job will begin rollout of the job document to all devices in the target group for a job. The startTime
can be scheduled up to a year in advance and must be scheduled a minimum of thirty minutes from the current time.
The time a job will stop rollout of the job document to all devices in the target group for a job. The endTime
must take place no later than two years from the current time and be scheduled a minimum of thirty minutes from the current time. The minimum duration between startTime
and endTime
is thirty minutes. The maximum duration between startTime
and endTime
is two years.
Specifies the end behavior for all job executions after a job reaches the selected endTime
. If endTime
is not selected when creating the job, then endBehavior
does not apply.
Specifies the date and time that a job will begin the rollout of the job document to all devices in the target group. Additionally, you can specify the end behavior for each job execution when it reaches the scheduled end time.
" + }, "SearchIndexRequest": { "type": "structure", "required": [ @@ -20948,6 +21040,11 @@ "String": { "type": "string" }, + "StringDateTime": { + "type": "string", + "max": 64, + "min": 1 + }, "StringList": { "type": "list", "member": { @@ -21467,7 +21564,7 @@ }, "managedFields": { "shape": "Fields", - "documentation": "Contains fields that are indexed and whose types are already known by the Fleet Indexing service.
" + "documentation": "Contains fields that are indexed and whose types are already known by the Fleet Indexing service. This is an optional field. For more information, see Managed fields in the Amazon Web Services IoT Core Developer Guide.
" }, "customFields": { "shape": "Fields", @@ -23082,6 +23179,42 @@ "UseBase64": { "type": "boolean" }, + "UserProperties": { + "type": "list", + "member": { + "shape": "UserProperty" + }, + "max": 100, + "min": 1 + }, + "UserProperty": { + "type": "structure", + "required": [ + "key", + "value" + ], + "members": { + "key": { + "shape": "UserPropertyKey", + "documentation": "A key to be specified in UserProperty
.
A value to be specified in UserProperty
.
A key-value pair that you define in the header. Both the key and the value are either literal strings or valid substitution templates.
" + }, + "UserPropertyKey": { + "type": "string", + "max": 1024, + "min": 0 + }, + "UserPropertyValue": { + "type": "string", + "max": 1024, + "min": 0 + }, "Valid": { "type": "boolean" }, diff --git a/apis/iot-data-2015-05-28.min.json b/apis/iot-data-2015-05-28.min.json index d04b0a0cab..6ef3a51a2b 100644 --- a/apis/iot-data-2015-05-28.min.json +++ b/apis/iot-data-2015-05-28.min.json @@ -219,6 +219,32 @@ }, "payload": { "type": "blob" + }, + "userProperties": { + "jsonvalue": true, + "location": "header", + "locationName": "x-amz-mqtt5-user-properties" + }, + "payloadFormatIndicator": { + "location": "header", + "locationName": "x-amz-mqtt5-payload-format-indicator" + }, + "contentType": { + "location": "querystring", + "locationName": "contentType" + }, + "responseTopic": { + "location": "querystring", + "locationName": "responseTopic" + }, + "correlationData": { + "location": "header", + "locationName": "x-amz-mqtt5-correlation-data" + }, + "messageExpiry": { + "location": "querystring", + "locationName": "messageExpiry", + "type": "long" } }, "payload": "payload" diff --git a/apis/iot-data-2015-05-28.normal.json b/apis/iot-data-2015-05-28.normal.json index c861c0e84a..ee11bbf3b9 100644 --- a/apis/iot-data-2015-05-28.normal.json +++ b/apis/iot-data-2015-05-28.normal.json @@ -220,6 +220,9 @@ }, { "shape": "MethodNotAllowedException" + }, + { + "shape": "ThrottlingException" } ], "documentation": "Publishes an MQTT message.
Requires permission to access the Publish action.
For more information about MQTT messages, see MQTT Protocol in the IoT Developer Guide.
For more information about messaging costs, see Amazon Web Services IoT Core pricing - Messaging.
" @@ -269,6 +272,12 @@ } }, "shapes": { + "ContentType": { + "type": "string" + }, + "CorrelationData": { + "type": "string" + }, "DeleteThingShadowRequest": { "type": "structure", "required": [ @@ -454,6 +463,9 @@ "max": 200, "min": 1 }, + "MessageExpiry": { + "type": "long" + }, "NamedShadowList": { "type": "list", "member": { @@ -471,6 +483,13 @@ "Payload": { "type": "blob" }, + "PayloadFormatIndicator": { + "type": "string", + "enum": [ + "UNSPECIFIED_BYTES", + "UTF8_DATA" + ] + }, "PayloadSize": { "type": "long" }, @@ -488,7 +507,7 @@ }, "qos": { "shape": "Qos", - "documentation": "The Quality of Service (QoS) level.
", + "documentation": "The Quality of Service (QoS) level. The default QoS level is 0.
", "location": "querystring", "locationName": "qos" }, @@ -501,6 +520,43 @@ "payload": { "shape": "Payload", "documentation": "The message body. MQTT accepts text, binary, and empty (null) message payloads.
Publishing an empty (null) payload with retain = true
deletes the retained message identified by topic from Amazon Web Services IoT Core.
A JSON string that contains an array of JSON objects. If you don’t use Amazon Web Services SDK or CLI, you must encode the JSON string to base64 format before adding it to the HTTP header. userProperties
is an HTTP header value in the API.
The following example userProperties
parameter is a JSON string which represents two User Properties. Note that it needs to be base64-encoded:
[{\"deviceName\": \"alpha\"}, {\"deviceCnt\": \"45\"}]
An Enum
string value that indicates whether the payload is formatted as UTF-8. payloadFormatIndicator
is an HTTP header value in the API.
A UTF-8 encoded string that describes the content of the publishing message.
", + "location": "querystring", + "locationName": "contentType" + }, + "responseTopic": { + "shape": "ResponseTopic", + "documentation": "A UTF-8 encoded string that's used as the topic name for a response message. The response topic is used to describe the topic which the receiver should publish to as part of the request-response flow. The topic must not contain wildcard characters.
", + "location": "querystring", + "locationName": "responseTopic" + }, + "correlationData": { + "shape": "CorrelationData", + "documentation": "The base64-encoded binary data used by the sender of the request message to identify which request the response message is for when it's received. correlationData
is an HTTP header value in the API.
A user-defined integer value that represents the message expiry interval in seconds. If absent, the message doesn't expire. For more information about the limits of messageExpiry
, see Amazon Web Services IoT Core message broker and protocol limits and quotas from the Amazon Web Services Reference Guide.
The input for the Publish operation.
", @@ -511,6 +567,9 @@ "max": 1, "min": 0 }, + "ResponseTopic": { + "type": "string" + }, "Retain": { "type": "boolean" }, @@ -597,6 +656,9 @@ }, "documentation": "The output from the UpdateThingShadow operation.
", "payload": "payload" + }, + "UserProperties": { + "type": "string" } }, "documentation": "IoT data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the Amazon Web Services cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete shadows. A shadow is a persistent representation of your things and their state in the Amazon Web Services cloud.
Find the endpoint address for actions in IoT data by running this CLI command:
aws iot describe-endpoint --endpoint-type iot:Data-ATS
The service name used by Amazon Web ServicesSignature Version 4 to sign requests is: iotdevicegateway.
" diff --git a/apis/iotwireless-2020-11-22.min.json b/apis/iotwireless-2020-11-22.min.json index ff9823feeb..f851ce9d7d 100644 --- a/apis/iotwireless-2020-11-22.min.json +++ b/apis/iotwireless-2020-11-22.min.json @@ -447,7 +447,8 @@ }, "Tags": { "shape": "S6" - } + }, + "Positioning": {} } }, "output": { @@ -472,7 +473,7 @@ "Name": {}, "Description": {}, "LoRaWAN": { - "shape": "S3k" + "shape": "S3o" }, "Tags": { "shape": "S6" @@ -533,7 +534,7 @@ }, "Name": {}, "Update": { - "shape": "S45" + "shape": "S49" }, "ClientRequestToken": { "idempotencyToken": true @@ -1065,19 +1066,19 @@ "type": "structure", "members": { "DeviceRegistrationState": { - "shape": "S5p" + "shape": "S5t" }, "Proximity": { - "shape": "S5s" + "shape": "S5w" }, "Join": { - "shape": "S5t" + "shape": "S5x" }, "ConnectionStatus": { - "shape": "S5v" + "shape": "S5z" }, "MessageDeliveryStatus": { - "shape": "S5x" + "shape": "S61" } } } @@ -1112,7 +1113,7 @@ "members": { "RfRegion": {}, "StartTime": { - "shape": "S62" + "shape": "S66" } } }, @@ -1139,10 +1140,10 @@ "members": { "DefaultLogLevel": {}, "WirelessGatewayLogOptions": { - "shape": "S66" + "shape": "S6a" }, "WirelessDeviceLogOptions": { - "shape": "S6c" + "shape": "S6g" } } } @@ -1212,7 +1213,7 @@ "type": "structure", "members": { "LoRaWAN": { - "shape": "S6p" + "shape": "S6t" } } } @@ -1279,7 +1280,7 @@ "type": "structure", "members": { "Sidewalk": { - "shape": "S6y" + "shape": "S72" }, "AccountLinked": { "type": "boolean" @@ -1307,13 +1308,15 @@ "location": "querystring", "locationName": "resourceType" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "output": { "type": "structure", "members": { "Position": { - "shape": "S75" + "shape": "S79" }, "Accuracy": { "type": "structure", @@ -1330,8 +1333,12 @@ "SolverProvider": {}, "SolverVersion": {}, "Timestamp": {} - } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "GetPositionConfiguration": { "http": { @@ -1354,16 +1361,487 @@ "location": "querystring", "locationName": "resourceType" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "output": { "type": "structure", "members": { "Solvers": { - "shape": "S7g" + "shape": "S7k" }, "Destination": {} + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." + }, + "GetPositionEstimate": { + "http": { + "requestUri": "/position-estimate" + }, + "input": { + "type": "structure", + "members": { + "WiFiAccessPoints": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "MacAddress", + "Rss" + ], + "members": { + "MacAddress": {}, + "Rss": { + "type": "integer" + } + } + } + }, + "CellTowers": { + "type": "structure", + "members": { + "Gsm": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "Mcc", + "Mnc", + "Lac", + "GeranCid" + ], + "members": { + "Mcc": { + "type": "integer" + }, + "Mnc": { + "type": "integer" + }, + "Lac": { + "type": "integer" + }, + "GeranCid": { + "type": "integer" + }, + "GsmLocalId": { + "type": "structure", + "required": [ + "Bsic", + "Bcch" + ], + "members": { + "Bsic": { + "type": "integer" + }, + "Bcch": { + "type": "integer" + } + } + }, + "GsmTimingAdvance": { + "type": "integer" + }, + "RxLevel": { + "type": "integer" + }, + "GsmNmr": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "Bsic", + "Bcch" + ], + "members": { + "Bsic": { + "type": "integer" + }, + "Bcch": { + "type": "integer" + }, + "RxLevel": { + "type": "integer" + }, + "GlobalIdentity": { + "type": "structure", + "required": [ + "Lac", + "GeranCid" + ], + "members": { + "Lac": { + "type": "integer" + }, + "GeranCid": { + "type": "integer" + } + } + } + } + } + } + } + } + }, + "Wcdma": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "Mcc", + "Mnc", + "UtranCid" + ], + "members": { + "Mcc": { + "type": "integer" + }, + "Mnc": { + "type": "integer" + }, + "Lac": { + "type": "integer" + }, + "UtranCid": { + "type": "integer" + }, + "WcdmaLocalId": { + "type": "structure", + "required": [ + "Uarfcndl", + "Psc" + ], + "members": { + "Uarfcndl": { + "type": "integer" + }, + "Psc": { + "type": "integer" + } + } + }, + "Rscp": { + "type": "integer" + }, + "PathLoss": { + "type": "integer" + }, + "WcdmaNmr": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "Uarfcndl", + "Psc", + "UtranCid" + ], + "members": { + "Uarfcndl": { + "type": "integer" + }, + "Psc": { + "type": "integer" + }, + "UtranCid": { + "type": "integer" + }, + "Rscp": { + "type": "integer" + }, + "PathLoss": { + "type": "integer" + } + } + } + } + } + } + }, + "Tdscdma": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "Mcc", + "Mnc", + "UtranCid" + ], + "members": { + "Mcc": { + "type": "integer" + }, + "Mnc": { + "type": "integer" + }, + "Lac": { + "type": "integer" + }, + "UtranCid": { + "type": "integer" + }, + "TdscdmaLocalId": { + "type": "structure", + "required": [ + "Uarfcn", + "CellParams" + ], + "members": { + "Uarfcn": { + "type": "integer" + }, + "CellParams": { + "type": "integer" + } + } + }, + "TdscdmaTimingAdvance": { + "type": "integer" + }, + "Rscp": { + "type": "integer" + }, + "PathLoss": { + "type": "integer" + }, + "TdscdmaNmr": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "Uarfcn", + "CellParams" + ], + "members": { + "Uarfcn": { + "type": "integer" + }, + "CellParams": { + "type": "integer" + }, + "UtranCid": { + "type": "integer" + }, + "Rscp": { + "type": "integer" + }, + "PathLoss": { + "type": "integer" + } + } + } + } + } + } + }, + "Lte": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "Mcc", + "Mnc", + "EutranCid" + ], + "members": { + "Mcc": { + "type": "integer" + }, + "Mnc": { + "type": "integer" + }, + "EutranCid": { + "type": "integer" + }, + "Tac": { + "type": "integer" + }, + "LteLocalId": { + "type": "structure", + "required": [ + "Pci", + "Earfcn" + ], + "members": { + "Pci": { + "type": "integer" + }, + "Earfcn": { + "type": "integer" + } + } + }, + "LteTimingAdvance": { + "type": "integer" + }, + "Rsrp": { + "type": "integer" + }, + "Rsrq": { + "type": "float" + }, + "NrCapable": { + "type": "boolean" + }, + "LteNmr": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "Pci", + "Earfcn", + "EutranCid" + ], + "members": { + "Pci": { + "type": "integer" + }, + "Earfcn": { + "type": "integer" + }, + "EutranCid": { + "type": "integer" + }, + "Rsrp": { + "type": "integer" + }, + "Rsrq": { + "type": "float" + } + } + } + } + } + } + }, + "Cdma": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "SystemId", + "NetworkId", + "BaseStationId" + ], + "members": { + "SystemId": { + "type": "integer" + }, + "NetworkId": { + "type": "integer" + }, + "BaseStationId": { + "type": "integer" + }, + "RegistrationZone": { + "type": "integer" + }, + "CdmaLocalId": { + "type": "structure", + "required": [ + "PnOffset", + "CdmaChannel" + ], + "members": { + "PnOffset": { + "type": "integer" + }, + "CdmaChannel": { + "type": "integer" + } + } + }, + "PilotPower": { + "type": "integer" + }, + "BaseLat": { + "type": "float" + }, + "BaseLng": { + "type": "float" + }, + "CdmaNmr": { + "type": "list", + "member": { + "type": "structure", + "required": [ + "PnOffset", + "CdmaChannel" + ], + "members": { + "PnOffset": { + "type": "integer" + }, + "CdmaChannel": { + "type": "integer" + }, + "PilotPower": { + "type": "integer" + }, + "BaseStationId": { + "type": "integer" + } + } + } + } + } + } + } + } + }, + "Ip": { + "type": "structure", + "required": [ + "IpAddress" + ], + "members": { + "IpAddress": {} + } + }, + "Gnss": { + "type": "structure", + "required": [ + "Payload" + ], + "members": { + "Payload": {}, + "CaptureTime": { + "type": "float" + }, + "CaptureTimeAccuracy": { + "type": "float" + }, + "AssistPosition": { + "type": "list", + "member": { + "type": "float" + } + }, + "AssistAltitude": { + "type": "float" + }, + "Use2DSolver": { + "type": "boolean" + } + } + }, + "Timestamp": { + "type": "timestamp" + } } + }, + "output": { + "type": "structure", + "members": { + "GeoJsonPayload": { + "type": "blob" + } + }, + "payload": "GeoJsonPayload" } }, "GetResourceEventConfiguration": { @@ -1396,19 +1874,19 @@ "type": "structure", "members": { "DeviceRegistrationState": { - "shape": "S7p" + "shape": "S9y" }, "Proximity": { - "shape": "S7r" + "shape": "Sa0" }, "Join": { - "shape": "S7s" + "shape": "Sa1" }, "ConnectionStatus": { - "shape": "S7u" + "shape": "Sa3" }, "MessageDeliveryStatus": { - "shape": "S7w" + "shape": "Sa5" } } } @@ -1443,6 +1921,38 @@ } } }, + "GetResourcePosition": { + "http": { + "method": "GET", + "requestUri": "/resource-positions/{ResourceIdentifier}" + }, + "input": { + "type": "structure", + "required": [ + "ResourceIdentifier", + "ResourceType" + ], + "members": { + "ResourceIdentifier": { + "location": "uri", + "locationName": "ResourceIdentifier" + }, + "ResourceType": { + "location": "querystring", + "locationName": "resourceType" + } + } + }, + "output": { + "type": "structure", + "members": { + "GeoJsonPayload": { + "type": "blob" + } + }, + "payload": "GeoJsonPayload" + } + }, "GetServiceEndpoint": { "http": { "method": "GET", @@ -1591,10 +2101,11 @@ "SidewalkId": {}, "SidewalkManufacturingSn": {}, "DeviceCertificates": { - "shape": "S8y" + "shape": "Sb9" } } - } + }, + "Positioning": {} } } }, @@ -1695,7 +2206,7 @@ "Id": {}, "Description": {}, "LoRaWAN": { - "shape": "S3k" + "shape": "S3o" }, "Arn": {}, "ThingName": {}, @@ -1752,7 +2263,7 @@ "type": "structure", "members": { "CurrentVersion": { - "shape": "S4a" + "shape": "S4e" } } } @@ -1839,7 +2350,7 @@ }, "Name": {}, "Update": { - "shape": "S45" + "shape": "S49" }, "Arn": {} } @@ -1964,19 +2475,19 @@ "type": "structure", "members": { "DeviceRegistrationState": { - "shape": "S7p" + "shape": "S9y" }, "Proximity": { - "shape": "S7r" + "shape": "Sa0" }, "Join": { - "shape": "S7s" + "shape": "Sa1" }, "ConnectionStatus": { - "shape": "S7u" + "shape": "Sa3" }, "MessageDeliveryStatus": { - "shape": "S7w" + "shape": "Sa5" } } } @@ -2164,7 +2675,7 @@ "Sidewalk": { "type": "list", "member": { - "shape": "S6y" + "shape": "S72" } } } @@ -2192,7 +2703,9 @@ "location": "querystring", "locationName": "nextToken" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "output": { "type": "structure", @@ -2205,15 +2718,19 @@ "ResourceIdentifier": {}, "ResourceType": {}, "Solvers": { - "shape": "S7g" + "shape": "S7k" }, "Destination": {} } } }, "NextToken": {} - } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "ListQueuedMessages": { "http": { @@ -2260,7 +2777,7 @@ }, "ReceivedAt": {}, "LoRaWAN": { - "shape": "Sb0" + "shape": "Sdb" } } } @@ -2402,7 +2919,7 @@ "SidewalkId": {}, "SidewalkManufacturingSn": {}, "DeviceCertificates": { - "shape": "S8y" + "shape": "Sb9" } } }, @@ -2454,10 +2971,10 @@ "type": "structure", "members": { "CurrentVersion": { - "shape": "S4a" + "shape": "S4e" }, "UpdateVersion": { - "shape": "S4a" + "shape": "S4e" } } }, @@ -2501,7 +3018,7 @@ "Name": {}, "Description": {}, "LoRaWAN": { - "shape": "S3k" + "shape": "S3o" }, "LastUplinkReceivedAt": {} } @@ -2548,12 +3065,18 @@ } }, "Destination": {} - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "output": { "type": "structure", - "members": {} - } + "members": {}, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "PutResourceLogLevel": { "http": { @@ -2693,7 +3216,7 @@ "type": "structure", "members": { "LoRaWAN": { - "shape": "Sb0" + "shape": "Sdb" }, "Sidewalk": { "type": "structure", @@ -2791,7 +3314,7 @@ "type": "structure", "members": { "StartTime": { - "shape": "S62" + "shape": "S66" } } } @@ -2820,7 +3343,7 @@ "locationName": "Id" }, "LoRaWAN": { - "shape": "S6p" + "shape": "S6t" } } }, @@ -2946,19 +3469,19 @@ "type": "structure", "members": { "DeviceRegistrationState": { - "shape": "S5p" + "shape": "S5t" }, "Proximity": { - "shape": "S5s" + "shape": "S5w" }, "Join": { - "shape": "S5t" + "shape": "S5x" }, "ConnectionStatus": { - "shape": "S5v" + "shape": "S5z" }, "MessageDeliveryStatus": { - "shape": "S5x" + "shape": "S61" } } }, @@ -3007,10 +3530,10 @@ "members": { "DefaultLogLevel": {}, "WirelessDeviceLogOptions": { - "shape": "S6c" + "shape": "S6g" }, "WirelessGatewayLogOptions": { - "shape": "S66" + "shape": "S6a" } } }, @@ -3146,14 +3669,20 @@ "locationName": "resourceType" }, "Position": { - "shape": "S75" + "shape": "S79" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "output": { "type": "structure", - "members": {} - } + "members": {}, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "UpdateResourceEventConfiguration": { "http": { @@ -3181,19 +3710,19 @@ "locationName": "partnerType" }, "DeviceRegistrationState": { - "shape": "S7p" + "shape": "S9y" }, "Proximity": { - "shape": "S7r" + "shape": "Sa0" }, "Join": { - "shape": "S7s" + "shape": "Sa1" }, "ConnectionStatus": { - "shape": "S7u" + "shape": "Sa3" }, "MessageDeliveryStatus": { - "shape": "S7w" + "shape": "Sa5" } } }, @@ -3202,6 +3731,38 @@ "members": {} } }, + "UpdateResourcePosition": { + "http": { + "method": "PATCH", + "requestUri": "/resource-positions/{ResourceIdentifier}", + "responseCode": 204 + }, + "input": { + "type": "structure", + "required": [ + "ResourceIdentifier", + "ResourceType" + ], + "members": { + "ResourceIdentifier": { + "location": "uri", + "locationName": "ResourceIdentifier" + }, + "ResourceType": { + "location": "querystring", + "locationName": "resourceType" + }, + "GeoJsonPayload": { + "type": "blob" + } + }, + "payload": "GeoJsonPayload" + }, + "output": { + "type": "structure", + "members": {} + } + }, "UpdateWirelessDevice": { "http": { "method": "PATCH", @@ -3247,11 +3808,15 @@ "members": { "Positioning": { "shape": "S3f" + }, + "Applications": { + "shape": "S3g" } } } } - } + }, + "Positioning": {} } }, "output": { @@ -3278,10 +3843,10 @@ "Name": {}, "Description": {}, "JoinEuiFilters": { - "shape": "S3m" + "shape": "S3q" }, "NetIdFilters": { - "shape": "S3o" + "shape": "S3s" } } }, @@ -3476,6 +4041,9 @@ }, "Positioning": { "shape": "S3f" + }, + "Applications": { + "shape": "S3g" } } } @@ -3495,16 +4063,29 @@ } } }, - "S3k": { + "S3g": { + "type": "list", + "member": { + "type": "structure", + "members": { + "FPort": { + "type": "integer" + }, + "Type": {}, + "DestinationName": {} + } + } + }, + "S3o": { "type": "structure", "members": { "GatewayEui": {}, "RfRegion": {}, "JoinEuiFilters": { - "shape": "S3m" + "shape": "S3q" }, "NetIdFilters": { - "shape": "S3o" + "shape": "S3s" }, "SubBands": { "type": "list", @@ -3528,18 +4109,18 @@ } } }, - "S3m": { + "S3q": { "type": "list", "member": { "type": "list", "member": {} } }, - "S3o": { + "S3s": { "type": "list", "member": {} }, - "S45": { + "S49": { "type": "structure", "members": { "UpdateDataSource": {}, @@ -3552,16 +4133,16 @@ "type": "long" }, "CurrentVersion": { - "shape": "S4a" + "shape": "S4e" }, "UpdateVersion": { - "shape": "S4a" + "shape": "S4e" } } } } }, - "S4a": { + "S4e": { "type": "structure", "members": { "PackageVersion": {}, @@ -3569,29 +4150,29 @@ "Station": {} } }, - "S5p": { + "S5t": { "type": "structure", "members": { "Sidewalk": { - "shape": "S5q" + "shape": "S5u" } } }, - "S5q": { + "S5u": { "type": "structure", "members": { "WirelessDeviceEventTopic": {} } }, - "S5s": { + "S5w": { "type": "structure", "members": { "Sidewalk": { - "shape": "S5q" + "shape": "S5u" } } }, - "S5t": { + "S5x": { "type": "structure", "members": { "LoRaWAN": { @@ -3602,7 +4183,7 @@ } } }, - "S5v": { + "S5z": { "type": "structure", "members": { "LoRaWAN": { @@ -3613,19 +4194,19 @@ } } }, - "S5x": { + "S61": { "type": "structure", "members": { "Sidewalk": { - "shape": "S5q" + "shape": "S5u" } } }, - "S62": { + "S66": { "type": "timestamp", "timestampFormat": "iso8601" }, - "S66": { + "S6a": { "type": "list", "member": { "type": "structure", @@ -3653,7 +4234,7 @@ } } }, - "S6c": { + "S6g": { "type": "list", "member": { "type": "structure", @@ -3681,7 +4262,7 @@ } } }, - "S6p": { + "S6t": { "type": "structure", "members": { "DlDr": { @@ -3699,7 +4280,7 @@ } } }, - "S6y": { + "S72": { "type": "structure", "members": { "AmazonId": {}, @@ -3710,13 +4291,13 @@ "Arn": {} } }, - "S75": { + "S79": { "type": "list", "member": { "type": "float" } }, - "S7g": { + "S7k": { "type": "structure", "members": { "SemtechGnss": { @@ -3730,31 +4311,31 @@ } } }, - "S7p": { + "S9y": { "type": "structure", "members": { "Sidewalk": { - "shape": "S7q" + "shape": "S9z" }, "WirelessDeviceIdEventTopic": {} } }, - "S7q": { + "S9z": { "type": "structure", "members": { "AmazonIdEventTopic": {} } }, - "S7r": { + "Sa0": { "type": "structure", "members": { "Sidewalk": { - "shape": "S7q" + "shape": "S9z" }, "WirelessDeviceIdEventTopic": {} } }, - "S7s": { + "Sa1": { "type": "structure", "members": { "LoRaWAN": { @@ -3766,7 +4347,7 @@ "WirelessDeviceIdEventTopic": {} } }, - "S7u": { + "Sa3": { "type": "structure", "members": { "LoRaWAN": { @@ -3778,16 +4359,16 @@ "WirelessGatewayIdEventTopic": {} } }, - "S7w": { + "Sa5": { "type": "structure", "members": { "Sidewalk": { - "shape": "S7q" + "shape": "S9z" }, "WirelessDeviceIdEventTopic": {} } }, - "S8y": { + "Sb9": { "type": "list", "member": { "type": "structure", @@ -3801,7 +4382,7 @@ } } }, - "Sb0": { + "Sdb": { "type": "structure", "members": { "FPort": { diff --git a/apis/iotwireless-2020-11-22.normal.json b/apis/iotwireless-2020-11-22.normal.json index 7152252d8a..6c3e2824ef 100644 --- a/apis/iotwireless-2020-11-22.normal.json +++ b/apis/iotwireless-2020-11-22.normal.json @@ -1528,7 +1528,9 @@ "shape": "InternalServerException" } ], - "documentation": "Get the position information for a given resource.
" + "documentation": "Get the position information for a given resource.
This action is no longer supported. Calls to retrieve the position information should use the GetResourcePosition API operation instead.
Get position configuration for a given resource.
" + "documentation": "Get position configuration for a given resource.
This action is no longer supported. Calls to retrieve the position configuration should use the GetResourcePosition API operation instead.
Get estimated position information as a payload in GeoJSON format. The payload measurement data is resolved using solvers that are provided by third-party vendors.
" }, "GetResourceEventConfiguration": { "name": "GetResourceEventConfiguration", @@ -1625,6 +1660,37 @@ ], "documentation": "Fetches the log-level override, if any, for a given resource-ID and resource-type. It can be used for a wireless device or a wireless gateway.
" }, + "GetResourcePosition": { + "name": "GetResourcePosition", + "http": { + "method": "GET", + "requestUri": "/resource-positions/{ResourceIdentifier}" + }, + "input": { + "shape": "GetResourcePositionRequest" + }, + "output": { + "shape": "GetResourcePositionResponse" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "InternalServerException" + } + ], + "documentation": "Get the position information for a given wireless device or a wireless gateway resource. The postion information uses the World Geodetic System (WGS84).
" + }, "GetServiceEndpoint": { "name": "GetServiceEndpoint", "http": { @@ -2188,7 +2254,9 @@ "shape": "InternalServerException" } ], - "documentation": "List position configurations for a given resource, such as positioning solvers.
" + "documentation": "List position configurations for a given resource, such as positioning solvers.
This action is no longer supported. Calls to retrieve position information should use the GetResourcePosition API operation instead.
Put position configuration for a given resource.
" + "documentation": "Put position configuration for a given resource.
This action is no longer supported. Calls to update the position configuration should use the UpdateResourcePosition API operation instead.
Update the position information of a resource.
" + "documentation": "Update the position information of a resource.
This action is no longer supported. Calls to update the position information should use the UpdateResourcePosition API operation instead.
Update the event configuration for a particular resource identifier.
" }, + "UpdateResourcePosition": { + "name": "UpdateResourcePosition", + "http": { + "method": "PATCH", + "requestUri": "/resource-positions/{ResourceIdentifier}", + "responseCode": 204 + }, + "input": { + "shape": "UpdateResourcePositionRequest" + }, + "output": { + "shape": "UpdateResourcePositionResponse" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "InternalServerException" + } + ], + "documentation": "Update the position information of a given wireless device or a wireless gateway resource. The postion coordinates are based on the World Geodetic System (WGS84).
" + }, "UpdateWirelessDevice": { "name": "UpdateWirelessDevice", "http": { @@ -3190,11 +3294,11 @@ "members": { "HorizontalAccuracy": { "shape": "HorizontalAccuracy", - "documentation": "The horizontal accuracy of the estimated position in meters.
" + "documentation": "The horizontal accuracy of the estimated position, which is the difference between the estimated location and the actual device location.
" }, "VerticalAccuracy": { "shape": "VerticalAccuracy", - "documentation": "The vertical accuracy of the estimated position in meters.
" + "documentation": "The vertical accuracy of the estimated position, which is the difference between the estimated altitude and actual device latitude in meters.
" } }, "documentation": "The accuracy of the estimated position in meters. An empty value indicates that no position data is available. A value of ‘0.0’ value indicates that position data is available. This data corresponds to the position information that you specified instead of the position computed by solver.
" @@ -3236,6 +3340,43 @@ "pattern": "[a-fA-F0-9]{64}", "sensitive": true }, + "ApplicationConfig": { + "type": "structure", + "members": { + "FPort": { + "shape": "FPort" + }, + "Type": { + "shape": "ApplicationConfigType", + "documentation": "Application type, which can be specified to obtain real-time position information of your LoRaWAN device.
" + }, + "DestinationName": { + "shape": "DestinationName", + "documentation": "The name of the position data destination that describes the AWS IoT rule that processes the device's position data for use by AWS IoT Core for LoRaWAN.
" + } + }, + "documentation": "LoRaWAN application configuration, which can be used to perform geolocation.
" + }, + "ApplicationConfigType": { + "type": "string", + "enum": [ + "SemtechGeolocation" + ] + }, + "Applications": { + "type": "list", + "member": { + "shape": "ApplicationConfig" + } + }, + "AssistPosition": { + "type": "list", + "member": { + "shape": "Coordinate" + }, + "max": 2, + "min": 2 + }, "AssociateAwsAccountWithPartnerAccountRequest": { "type": "structure", "required": [ @@ -3410,6 +3551,31 @@ "AutoCreateTasks": { "type": "boolean" }, + "BCCH": { + "type": "integer", + "max": 1023, + "min": 0 + }, + "BSIC": { + "type": "integer", + "max": 63, + "min": 0 + }, + "BaseLat": { + "type": "float", + "max": 90, + "min": -90 + }, + "BaseLng": { + "type": "float", + "max": 180, + "min": -180 + }, + "BaseStationId": { + "type": "integer", + "max": 65535, + "min": 0 + }, "BatteryLevel": { "type": "string", "documentation": "Sidewalk device battery level.
", @@ -3468,6 +3634,152 @@ "type": "structure", "members": {} }, + "CaptureTimeAccuracy": { + "type": "float" + }, + "CdmaChannel": { + "type": "integer", + "max": 4095, + "min": 0 + }, + "CdmaList": { + "type": "list", + "member": { + "shape": "CdmaObj" + }, + "max": 16, + "min": 1 + }, + "CdmaLocalId": { + "type": "structure", + "required": [ + "PnOffset", + "CdmaChannel" + ], + "members": { + "PnOffset": { + "shape": "PnOffset", + "documentation": "Pseudo-noise offset, which is a characteristic of the signal from a cell on a radio tower.
" + }, + "CdmaChannel": { + "shape": "CdmaChannel", + "documentation": "CDMA channel information.
" + } + }, + "documentation": "CDMA local ID information, which corresponds to the local identification parameters of a CDMA cell.
" + }, + "CdmaNmrList": { + "type": "list", + "member": { + "shape": "CdmaNmrObj" + }, + "max": 32, + "min": 1 + }, + "CdmaNmrObj": { + "type": "structure", + "required": [ + "PnOffset", + "CdmaChannel" + ], + "members": { + "PnOffset": { + "shape": "PnOffset", + "documentation": "Pseudo-noise offset, which is a characteristic of the signal from a cell on a radio tower.
" + }, + "CdmaChannel": { + "shape": "CdmaChannel", + "documentation": "CDMA channel information.
" + }, + "PilotPower": { + "shape": "PilotPower", + "documentation": "Transmit power level of the pilot signal, measured in dBm (decibel-milliwatts).
" + }, + "BaseStationId": { + "shape": "BaseStationId", + "documentation": "CDMA base station ID (BSID).
" + } + }, + "documentation": "CDMA object for network measurement reports.
" + }, + "CdmaObj": { + "type": "structure", + "required": [ + "SystemId", + "NetworkId", + "BaseStationId" + ], + "members": { + "SystemId": { + "shape": "SystemId", + "documentation": "CDMA system ID (SID).
" + }, + "NetworkId": { + "shape": "NetworkId", + "documentation": "CDMA network ID (NID).
" + }, + "BaseStationId": { + "shape": "BaseStationId", + "documentation": "CDMA base station ID (BSID).
" + }, + "RegistrationZone": { + "shape": "RegistrationZone", + "documentation": "CDMA registration zone (RZ).
" + }, + "CdmaLocalId": { + "shape": "CdmaLocalId", + "documentation": "CDMA local identification (local ID) parameters.
" + }, + "PilotPower": { + "shape": "PilotPower", + "documentation": "Transmit power level of the pilot signal, measured in dBm (decibel-milliwatts).
" + }, + "BaseLat": { + "shape": "BaseLat", + "documentation": "CDMA base station latitude in degrees.
" + }, + "BaseLng": { + "shape": "BaseLng", + "documentation": "CDMA base station longtitude in degrees.
" + }, + "CdmaNmr": { + "shape": "CdmaNmrList", + "documentation": "CDMA network measurement reports.
" + } + }, + "documentation": "CDMA (Code-division multiple access) object.
" + }, + "CellParams": { + "type": "integer", + "max": 127, + "min": 0 + }, + "CellTowers": { + "type": "structure", + "members": { + "Gsm": { + "shape": "GsmList", + "documentation": "GSM object information.
" + }, + "Wcdma": { + "shape": "WcdmaList", + "documentation": "WCDMA object information.
" + }, + "Tdscdma": { + "shape": "TdscdmaList", + "documentation": "TD-SCDMA object information.
" + }, + "Lte": { + "shape": "LteList", + "documentation": "LTE object information.
" + }, + "Cdma": { + "shape": "CdmaList", + "documentation": "CDMA object information.
" + } + }, + "documentation": "The cell towers that were used to perform the measurements.
" + }, "CertificateList": { "type": "structure", "required": [ @@ -3550,6 +3862,9 @@ }, "documentation": "Connection status resource type event configuration object for enabling or disabling topic.
" }, + "Coordinate": { + "type": "float" + }, "Crc": { "type": "long", "max": 4294967295, @@ -3836,6 +4151,10 @@ "Tags": { "shape": "TagList", "documentation": "The tags to attach to the new wireless device. Tags are metadata that you can use to manage a resource.
" + }, + "Positioning": { + "shape": "PositioningConfigStatus", + "documentation": "FPort values for the GNSS, stream, and ClockSync functions of the positioning information.
" } } }, @@ -3972,6 +4291,9 @@ "type": "timestamp", "documentation": "Created at timestamp for the resource.
" }, + "CreationDate": { + "type": "timestamp" + }, "DeleteDestinationRequest": { "type": "structure", "required": [ @@ -4564,11 +4886,21 @@ "max": 15, "min": 0 }, + "EARFCN": { + "type": "integer", + "max": 262143, + "min": 0 + }, "EndPoint": { "type": "string", "max": 256, "min": 1 }, + "EutranCid": { + "type": "integer", + "max": 268435455, + "min": 0 + }, "Event": { "type": "string", "documentation": "Sidewalk device status notification.
", @@ -4696,6 +5028,10 @@ "Positioning": { "shape": "Positioning", "documentation": "FPort values for the GNSS, stream, and ClockSync functions of the positioning information.
" + }, + "Applications": { + "shape": "Applications", + "documentation": "Optional LoRaWAN application information, which can be used for geolocation.
" } }, "documentation": "List of FPort assigned for different LoRaWAN application packages to use
" @@ -4792,6 +5128,9 @@ "Delete_Waiting" ] }, + "GPST": { + "type": "float" + }, "GatewayEui": { "type": "string", "pattern": "^(([0-9A-Fa-f]{2}-){7}|([0-9A-Fa-f]{2}:){7}|([0-9A-Fa-f]{2}\\s){7}|([0-9A-Fa-f]{2}){7})([0-9A-Fa-f]{2})$" @@ -4824,6 +5163,14 @@ "type": "string", "pattern": "[a-fA-F0-9]{32}" }, + "GeoJsonPayload": { + "type": "blob" + }, + "GeranCid": { + "type": "integer", + "max": 65535, + "min": 0 + }, "GetDestinationRequest": { "type": "structure", "required": [ @@ -5146,7 +5493,9 @@ "location": "querystring", "locationName": "resourceType" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "GetPositionConfigurationResponse": { "type": "structure", @@ -5159,8 +5508,45 @@ "shape": "DestinationName", "documentation": "The position data destination that describes the AWS IoT rule that processes the device's position data for use by AWS IoT Core for LoRaWAN.
" } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." + }, + "GetPositionEstimateRequest": { + "type": "structure", + "members": { + "WiFiAccessPoints": { + "shape": "WiFiAccessPoints", + "documentation": "Retrieves an estimated device position by resolving WLAN measurement data. The position is resolved using HERE's Wi-Fi based solver.
" + }, + "CellTowers": { + "shape": "CellTowers", + "documentation": "Retrieves an estimated device position by resolving measurement data from cellular radio towers. The position is resolved using HERE's cellular-based solver.
" + }, + "Ip": { + "shape": "Ip", + "documentation": "Retrieves an estimated device position by resolving the IP address information from the device. The position is resolved using MaxMind's IP-based solver.
" + }, + "Gnss": { + "shape": "Gnss", + "documentation": "Retrieves an estimated device position by resolving the global navigation satellite system (GNSS) scan data. The position is resolved using the GNSS solver powered by LoRa Cloud.
" + }, + "Timestamp": { + "shape": "CreationDate", + "documentation": "Optional information that specifies the time when the position information will be resolved. It uses the UNIX timestamp format. If not specified, the time at which the request was received will be used.
" + } } }, + "GetPositionEstimateResponse": { + "type": "structure", + "members": { + "GeoJsonPayload": { + "shape": "GeoJsonPayload", + "documentation": "The position information of the resource, displayed as a JSON payload. The payload uses the GeoJSON format, which a format that's used to encode geographic data structures. For more information, see GeoJSON.
" + } + }, + "payload": "GeoJsonPayload" + }, "GetPositionRequest": { "type": "structure", "required": [ @@ -5180,7 +5566,9 @@ "location": "querystring", "locationName": "resourceType" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "GetPositionResponse": { "type": "structure", @@ -5209,7 +5597,9 @@ "shape": "ISODateTimeString", "documentation": "The timestamp at which the device's position was determined.
" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "GetResourceEventConfigurationRequest": { "type": "structure", @@ -5291,6 +5681,37 @@ } } }, + "GetResourcePositionRequest": { + "type": "structure", + "required": [ + "ResourceIdentifier", + "ResourceType" + ], + "members": { + "ResourceIdentifier": { + "shape": "PositionResourceIdentifier", + "documentation": "The identifier of the resource for which position information is retrieved. It can be the wireless device ID or the wireless gateway ID depending on the resource type.
", + "location": "uri", + "locationName": "ResourceIdentifier" + }, + "ResourceType": { + "shape": "PositionResourceType", + "documentation": "The type of resource for which position information is retrieved, which can be a wireless device or a wireless gateway.
", + "location": "querystring", + "locationName": "resourceType" + } + } + }, + "GetResourcePositionResponse": { + "type": "structure", + "members": { + "GeoJsonPayload": { + "shape": "GeoJsonPayload", + "documentation": "The position information of the resource, displayed as a JSON payload. The payload uses the GeoJSON format, which a format that's used to encode geographic data structures. For more information, see GeoJSON.
" + } + }, + "payload": "GeoJsonPayload" + }, "GetServiceEndpointRequest": { "type": "structure", "members": { @@ -5417,6 +5838,10 @@ "Sidewalk": { "shape": "SidewalkDevice", "documentation": "Sidewalk device object.
" + }, + "Positioning": { + "shape": "PositioningConfigStatus", + "documentation": "FPort values for the GNSS, stream, and ClockSync functions of the positioning information.
" } } }, @@ -5664,13 +6089,180 @@ } } }, - "HorizontalAccuracy": { - "type": "float", - "min": 0 + "GlobalIdentity": { + "type": "structure", + "required": [ + "Lac", + "GeranCid" + ], + "members": { + "Lac": { + "shape": "LAC", + "documentation": "Location area code of the global identity.
" + }, + "GeranCid": { + "shape": "GeranCid", + "documentation": "GERAN (GSM EDGE Radio Access Network) cell global identifier.
" + } + }, + "documentation": "Global identity information.
" }, - "HrAllowed": { + "Gnss": { + "type": "structure", + "required": [ + "Payload" + ], + "members": { + "Payload": { + "shape": "GnssNav", + "documentation": "Payload that contains the GNSS scan result, or NAV message, in hexadecimal notation.
" + }, + "CaptureTime": { + "shape": "GPST", + "documentation": "Optional parameter that gives an estimate of the time when the GNSS scan information is taken, in seconds GPS time (GPST). If capture time is not specified, the local server time is used.
" + }, + "CaptureTimeAccuracy": { + "shape": "CaptureTimeAccuracy", + "documentation": "Optional value that gives the capture time estimate accuracy, in seconds. If capture time accuracy is not specified, default value of 300 is used.
" + }, + "AssistPosition": { + "shape": "AssistPosition", + "documentation": "Optional assistance position information, specified using latitude and longitude values in degrees. The co-ordinates are inside the WGS84 reference frame.
" + }, + "AssistAltitude": { + "shape": "Coordinate", + "documentation": "Optional assistance altitude, which is the altitude of the device at capture time, specified in meters above the WGS84 reference ellipsoid.
" + }, + "Use2DSolver": { + "shape": "Use2DSolver", + "documentation": "Optional parameter that forces 2D solve, which modifies the positioning algorithm to a 2D solution problem. When this parameter is specified, the assistance altitude should have an accuracy of at least 10 meters.
" + } + }, + "documentation": "Global navigation satellite system (GNSS) object used for positioning.
" + }, + "GnssNav": { + "type": "string", + "max": 2048 + }, + "GsmList": { + "type": "list", + "member": { + "shape": "GsmObj" + }, + "max": 16, + "min": 1 + }, + "GsmLocalId": { + "type": "structure", + "required": [ + "Bsic", + "Bcch" + ], + "members": { + "Bsic": { + "shape": "BSIC", + "documentation": "GSM base station identity code (BSIC).
" + }, + "Bcch": { + "shape": "BCCH", + "documentation": "GSM broadcast control channel.
" + } + }, + "documentation": "GSM local ID information, which corresponds to the local identification parameters of a GSM cell.
" + }, + "GsmNmrList": { + "type": "list", + "member": { + "shape": "GsmNmrObj" + }, + "max": 32, + "min": 1 + }, + "GsmNmrObj": { + "type": "structure", + "required": [ + "Bsic", + "Bcch" + ], + "members": { + "Bsic": { + "shape": "BSIC", + "documentation": "GSM base station identity code (BSIC).
" + }, + "Bcch": { + "shape": "BCCH", + "documentation": "GSM broadcast control channel.
" + }, + "RxLevel": { + "shape": "RxLevel", + "documentation": "Rx level, which is the received signal power, measured in dBm (decibel-milliwatts).
" + }, + "GlobalIdentity": { + "shape": "GlobalIdentity", + "documentation": "Global identity information of the GSM object.
" + } + }, + "documentation": "GSM object for network measurement reports.
" + }, + "GsmObj": { + "type": "structure", + "required": [ + "Mcc", + "Mnc", + "Lac", + "GeranCid" + ], + "members": { + "Mcc": { + "shape": "MCC", + "documentation": "Mobile Country Code.
" + }, + "Mnc": { + "shape": "MNC", + "documentation": "Mobile Network Code.
" + }, + "Lac": { + "shape": "LAC", + "documentation": "Location area code.
" + }, + "GeranCid": { + "shape": "GeranCid", + "documentation": "GERAN (GSM EDGE Radio Access Network) Cell Global Identifier.
" + }, + "GsmLocalId": { + "shape": "GsmLocalId", + "documentation": "GSM local identification (local ID) information.
" + }, + "GsmTimingAdvance": { + "shape": "GsmTimingAdvance", + "documentation": "Timing advance value, which corresponds to the length of time a signal takes to reach the base station from a mobile phone.
" + }, + "RxLevel": { + "shape": "RxLevel", + "documentation": "Rx level, which is the received signal power, measured in dBm (decibel-milliwatts).
" + }, + "GsmNmr": { + "shape": "GsmNmrList", + "documentation": "GSM object for network measurement reports.
" + } + }, + "documentation": "GSM object.
" + }, + "GsmTimingAdvance": { + "type": "integer", + "max": 63, + "min": 0 + }, + "HorizontalAccuracy": { + "type": "float", + "min": 0 + }, + "HrAllowed": { "type": "boolean" }, + "IPAddress": { + "type": "string" + }, "ISODateTimeString": { "type": "string", "pattern": "^([\\+-]?\\d{4}(?!\\d{2}\\b))((-?)((0[1-9]|1[0-2])(\\3([12]\\d|0[1-9]|3[01]))?|W([0-4]\\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\\d|[12]\\d{2}|3([0-5]\\d|6[1-6])))([T\\s]((([01]\\d|2[0-3])((:?)[0-5]\\d)?|24\\:?00)([\\.,]\\d+(?!:))?)?(\\17[0-5]\\d([\\.,]\\d+)?)?([zZ]|([\\+-])([01]\\d|2[0-3]):?([0-5]\\d)?)?)?)?$" @@ -5697,6 +6289,19 @@ "max": 4096, "min": 1 }, + "Ip": { + "type": "structure", + "required": [ + "IpAddress" + ], + "members": { + "IpAddress": { + "shape": "IPAddress", + "documentation": "IP address information.
" + } + }, + "documentation": "IP address used for resolving device location.
" + }, "JoinEui": { "type": "string", "pattern": "[a-fA-F0-9]{16}" @@ -5743,6 +6348,11 @@ }, "documentation": "Join resource type event configuration object for enabling or disabling topic.
" }, + "LAC": { + "type": "integer", + "max": 65535, + "min": 1 + }, "ListDestinationsRequest": { "type": "structure", "members": { @@ -6012,7 +6622,9 @@ "location": "querystring", "locationName": "nextToken" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "ListPositionConfigurationsResponse": { "type": "structure", @@ -6025,7 +6637,9 @@ "shape": "NextToken", "documentation": "The token to use to get the next set of results, or null if there are no additional results.
" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "ListQueuedMessagesRequest": { "type": "structure", @@ -6804,6 +7418,143 @@ "DISABLED" ] }, + "LteList": { + "type": "list", + "member": { + "shape": "LteObj" + }, + "max": 16, + "min": 1 + }, + "LteLocalId": { + "type": "structure", + "required": [ + "Pci", + "Earfcn" + ], + "members": { + "Pci": { + "shape": "PCI", + "documentation": "Physical cell ID.
" + }, + "Earfcn": { + "shape": "EARFCN", + "documentation": "Evolved universal terrestrial radio access (E-UTRA) absolute radio frequency channel number (FCN).
" + } + }, + "documentation": "LTE local identification (local ID) information.
" + }, + "LteNmrList": { + "type": "list", + "member": { + "shape": "LteNmrObj" + }, + "max": 32, + "min": 1 + }, + "LteNmrObj": { + "type": "structure", + "required": [ + "Pci", + "Earfcn", + "EutranCid" + ], + "members": { + "Pci": { + "shape": "PCI", + "documentation": "Physical cell ID.
" + }, + "Earfcn": { + "shape": "EARFCN", + "documentation": "E-UTRA (Evolved universal terrestrial Radio Access) absolute radio frequency channel Number (EARFCN).
" + }, + "EutranCid": { + "shape": "EutranCid", + "documentation": "E-UTRAN (Evolved Universal Terrestrial Radio Access Network) cell global identifier (EUTRANCID).
" + }, + "Rsrp": { + "shape": "RSRP", + "documentation": "Signal power of the reference signal received, measured in dBm (decibel-milliwatts).
" + }, + "Rsrq": { + "shape": "RSRQ", + "documentation": "Signal quality of the reference Signal received, measured in decibels (dB).
" + } + }, + "documentation": "LTE object for network measurement reports.
" + }, + "LteObj": { + "type": "structure", + "required": [ + "Mcc", + "Mnc", + "EutranCid" + ], + "members": { + "Mcc": { + "shape": "MCC", + "documentation": "Mobile Country Code.
" + }, + "Mnc": { + "shape": "MNC", + "documentation": "Mobile Network Code.
" + }, + "EutranCid": { + "shape": "EutranCid", + "documentation": "E-UTRAN (Evolved Universal Terrestrial Radio Access Network) Cell Global Identifier.
" + }, + "Tac": { + "shape": "TAC", + "documentation": "LTE tracking area code.
" + }, + "LteLocalId": { + "shape": "LteLocalId", + "documentation": "LTE local identification (local ID) information.
" + }, + "LteTimingAdvance": { + "shape": "LteTimingAdvance", + "documentation": "LTE timing advance.
" + }, + "Rsrp": { + "shape": "RSRP", + "documentation": "Signal power of the reference signal received, measured in dBm (decibel-milliwatts).
" + }, + "Rsrq": { + "shape": "RSRQ", + "documentation": "Signal quality of the reference Signal received, measured in decibels (dB).
" + }, + "NrCapable": { + "shape": "NRCapable", + "documentation": "Parameter that determines whether the LTE object is capable of supporting NR (new radio).
" + }, + "LteNmr": { + "shape": "LteNmrList", + "documentation": "LTE object for network measurement reports.
" + } + }, + "documentation": "LTE object.
" + }, + "LteTimingAdvance": { + "type": "integer", + "max": 1282, + "min": 0 + }, + "MCC": { + "type": "integer", + "max": 999, + "min": 200 + }, + "MNC": { + "type": "integer", + "max": 999, + "min": 0 + }, + "MacAddress": { + "type": "string", + "max": 17, + "min": 12, + "pattern": "^([0-9A-Fa-f]{2}[:-]?){5}([0-9A-Fa-f]{2})$" + }, "MacVersion": { "type": "string", "max": 64 @@ -6838,7 +7589,7 @@ }, "WirelessDeviceIdEventTopic": { "shape": "EventNotificationTopicStatus", - "documentation": "Denotes whether the wireless device ID device registration state event topic is enabled or disabled.
" + "documentation": "Denotes whether the wireless device ID message delivery status event topic is enabled or disabled.
" } }, "documentation": "Message delivery status event configuration object for enabling and disabling relevant topics.
" @@ -6951,6 +7702,9 @@ }, "documentation": "Wireless metadata that is to be sent to multicast group.
" }, + "NRCapable": { + "type": "boolean" + }, "NetId": { "type": "string", "documentation": "LoRaWAN network ID.
", @@ -6995,6 +7749,11 @@ }, "documentation": "Network analyzer configurations.
" }, + "NetworkId": { + "type": "integer", + "max": 65535, + "min": 0 + }, "NextToken": { "type": "string", "max": 4096 @@ -7058,6 +7817,16 @@ }, "documentation": "OTAA device object for v1.1
" }, + "PCI": { + "type": "integer", + "max": 503, + "min": 0 + }, + "PSC": { + "type": "integer", + "max": 511, + "min": 0 + }, "PackageVersion": { "type": "string", "max": 32, @@ -7099,12 +7868,22 @@ "Sidewalk" ] }, + "PathLoss": { + "type": "integer", + "max": 158, + "min": 46 + }, "PayloadData": { "type": "string", "documentation": "The binary to be sent to the end device, encoded in base64.
", "max": 2048, "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" }, + "PilotPower": { + "type": "integer", + "max": -49, + "min": -142 + }, "PingSlotDr": { "type": "integer", "max": 15, @@ -7120,6 +7899,11 @@ "max": 4096, "min": 128 }, + "PnOffset": { + "type": "integer", + "max": 511, + "min": 0 + }, "PositionConfigurationFec": { "type": "string", "enum": [ @@ -7234,6 +8018,13 @@ }, "documentation": "The FPorts for the position information.
" }, + "PositioningConfigStatus": { + "type": "string", + "enum": [ + "Enabled", + "Disabled" + ] + }, "PrAllowed": { "type": "boolean" }, @@ -7293,11 +8084,15 @@ "shape": "DestinationName", "documentation": "The position data destination that describes the AWS IoT rule that processes the device's position data for use by AWS IoT Core for LoRaWAN.
" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "PutPositionConfigurationResponse": { "type": "structure", - "members": {} + "members": {}, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "PutResourceLogLevelRequest": { "type": "structure", @@ -7332,6 +8127,26 @@ "documentation": "Query string used to search for wireless devices as part of the bulk associate and disassociate process.
", "max": 4096 }, + "RSCP": { + "type": "integer", + "max": -25, + "min": -120 + }, + "RSRP": { + "type": "integer", + "max": -44, + "min": -140 + }, + "RSRQ": { + "type": "float", + "max": -3, + "min": -19.5 + }, + "RSS": { + "type": "integer", + "max": 0, + "min": -128 + }, "RaAllowed": { "type": "boolean" }, @@ -7339,6 +8154,11 @@ "type": "string", "max": 64 }, + "RegistrationZone": { + "type": "integer", + "max": 4095, + "min": 0 + }, "ReportDevStatusBattery": { "type": "boolean" }, @@ -7419,6 +8239,11 @@ "max": 16700000, "min": 1000000 }, + "RxLevel": { + "type": "integer", + "max": -25, + "min": -110 + }, "SNwkSIntKey": { "type": "string", "pattern": "[a-fA-F0-9]{32}" @@ -7759,7 +8584,7 @@ }, "AckModeRetryDurationSecs": { "shape": "AckModeRetryDurationSecs", - "documentation": "The duration of time in seconds for which you want to retry sending the ACK.
" + "documentation": "The duration of time in seconds to retry sending the ACK.
" } }, "documentation": "Information about a Sidewalk router.
" @@ -7916,6 +8741,16 @@ "SupportsJoin": { "type": "boolean" }, + "SystemId": { + "type": "integer", + "max": 32767, + "min": 1 + }, + "TAC": { + "type": "integer", + "max": 65535, + "min": 0 + }, "Tag": { "type": "structure", "required": [ @@ -7989,6 +8824,122 @@ "max": 100, "min": 0 }, + "TdscdmaList": { + "type": "list", + "member": { + "shape": "TdscdmaObj" + }, + "max": 16, + "min": 1 + }, + "TdscdmaLocalId": { + "type": "structure", + "required": [ + "Uarfcn", + "CellParams" + ], + "members": { + "Uarfcn": { + "shape": "UARFCN", + "documentation": "TD-SCDMA UTRA (Universal Terrestrial Radio Access Network) absolute RF channel number (UARFCN).
" + }, + "CellParams": { + "shape": "CellParams", + "documentation": "Cell parameters for TD-SCDMA.
" + } + }, + "documentation": "TD-SCDMA local identification (local Id) information.
" + }, + "TdscdmaNmrList": { + "type": "list", + "member": { + "shape": "TdscdmaNmrObj" + }, + "max": 32, + "min": 1 + }, + "TdscdmaNmrObj": { + "type": "structure", + "required": [ + "Uarfcn", + "CellParams" + ], + "members": { + "Uarfcn": { + "shape": "UARFCN", + "documentation": "TD-SCDMA UTRA (Universal Terrestrial Radio Access Network) absolute RF channel number.
" + }, + "CellParams": { + "shape": "CellParams", + "documentation": "Cell parameters for TD-SCDMA network measurement reports object.
" + }, + "UtranCid": { + "shape": "UtranCid", + "documentation": "UTRAN (UMTS Terrestrial Radio Access Network) cell global identifier.
" + }, + "Rscp": { + "shape": "RSCP", + "documentation": "Code power of the received signal, measured in decibel-milliwatts (dBm).
" + }, + "PathLoss": { + "shape": "PathLoss", + "documentation": "Path loss, or path attenuation, is the reduction in power density of an electromagnetic wave as it propagates through space.
" + } + }, + "documentation": "TD-SCDMA object for network measurement reports.
" + }, + "TdscdmaObj": { + "type": "structure", + "required": [ + "Mcc", + "Mnc", + "UtranCid" + ], + "members": { + "Mcc": { + "shape": "MCC", + "documentation": "Mobile Country Code.
" + }, + "Mnc": { + "shape": "MNC", + "documentation": "Mobile Network Code.
" + }, + "Lac": { + "shape": "LAC", + "documentation": "Location Area Code.
" + }, + "UtranCid": { + "shape": "UtranCid", + "documentation": "UTRAN (UMTS Terrestrial Radio Access Network) Cell Global Identifier.
" + }, + "TdscdmaLocalId": { + "shape": "TdscdmaLocalId", + "documentation": "TD-SCDMA local identification (local ID) information.
" + }, + "TdscdmaTimingAdvance": { + "shape": "TdscdmaTimingAdvance", + "documentation": "TD-SCDMA Timing advance.
" + }, + "Rscp": { + "shape": "RSCP", + "documentation": "Signal power of the received signal (Received Signal Code Power), measured in decibel-milliwatts (dBm).
" + }, + "PathLoss": { + "shape": "PathLoss", + "documentation": "Path loss, or path attenuation, is the reduction in power density of an electromagnetic wave as it propagates through space.
" + }, + "TdscdmaNmr": { + "shape": "TdscdmaNmrList", + "documentation": "TD-SCDMA object for network measurement reports.
" + } + }, + "documentation": "TD-SCDMA object.
" + }, + "TdscdmaTimingAdvance": { + "type": "integer", + "max": 1530, + "min": 0 + }, "TestWirelessDeviceRequest": { "type": "structure", "required": [ @@ -8040,6 +8991,16 @@ "max": 1, "min": 0 }, + "UARFCN": { + "type": "integer", + "max": 16383, + "min": 0 + }, + "UARFCNDL": { + "type": "integer", + "max": 16383, + "min": 0 + }, "UlBucketSize": { "type": "integer", "max": 2147483647, @@ -8173,6 +9134,10 @@ "Positioning": { "shape": "Positioning", "documentation": "Positioning FPorts for the ClockSync, Stream, and GNSS functions.
" + }, + "Applications": { + "shape": "Applications", + "documentation": "LoRaWAN application, which can be used for geolocation by activating positioning.
" } }, "documentation": "Object for updating the FPorts information.
" @@ -8346,11 +9311,15 @@ "shape": "PositionCoordinate", "documentation": "The position information of the resource.
" } - } + }, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "UpdatePositionResponse": { "type": "structure", - "members": {} + "members": {}, + "deprecated": true, + "deprecatedMessage": "This operation is no longer supported." }, "UpdateResourceEventConfigurationRequest": { "type": "structure", @@ -8403,6 +9372,36 @@ "type": "structure", "members": {} }, + "UpdateResourcePositionRequest": { + "type": "structure", + "required": [ + "ResourceIdentifier", + "ResourceType" + ], + "members": { + "ResourceIdentifier": { + "shape": "PositionResourceIdentifier", + "documentation": "The identifier of the resource for which position information is updated. It can be the wireless device ID or the wireless gateway ID depending on the resource type.
", + "location": "uri", + "locationName": "ResourceIdentifier" + }, + "ResourceType": { + "shape": "PositionResourceType", + "documentation": "The type of resource for which position information is updated, which can be a wireless device or a wireless gateway.
", + "location": "querystring", + "locationName": "resourceType" + }, + "GeoJsonPayload": { + "shape": "GeoJsonPayload", + "documentation": "The position information of the resource, displayed as a JSON payload. The payload uses the GeoJSON format, which a format that's used to encode geographic data structures. For more information, see GeoJSON.
" + } + }, + "payload": "GeoJsonPayload" + }, + "UpdateResourcePositionResponse": { + "type": "structure", + "members": {} + }, "UpdateSignature": { "type": "string", "max": 4096, @@ -8435,6 +9434,10 @@ "LoRaWAN": { "shape": "LoRaWANUpdateDevice", "documentation": "The updated wireless device's configuration.
" + }, + "Positioning": { + "shape": "PositioningConfigStatus", + "documentation": "FPort values for the GNSS, stream, and ClockSync functions of the positioning information.
" } } }, @@ -8510,10 +9513,150 @@ }, "documentation": "UpdateWirelessGatewayTaskEntry object.
" }, + "Use2DSolver": { + "type": "boolean" + }, + "UtranCid": { + "type": "integer", + "max": 268435455, + "min": 0 + }, "VerticalAccuracy": { "type": "float", "min": 0 }, + "WcdmaList": { + "type": "list", + "member": { + "shape": "WcdmaObj" + }, + "max": 16, + "min": 1 + }, + "WcdmaLocalId": { + "type": "structure", + "required": [ + "Uarfcndl", + "Psc" + ], + "members": { + "Uarfcndl": { + "shape": "UARFCNDL", + "documentation": "WCDMA UTRA Absolute RF Channel Number downlink.
" + }, + "Psc": { + "shape": "PSC", + "documentation": "Primary Scrambling Code.
" + } + }, + "documentation": "WCDMA local identification (local ID) information.
" + }, + "WcdmaNmrList": { + "type": "list", + "member": { + "shape": "WcdmaNmrObj" + }, + "max": 32, + "min": 1 + }, + "WcdmaNmrObj": { + "type": "structure", + "required": [ + "Uarfcndl", + "Psc", + "UtranCid" + ], + "members": { + "Uarfcndl": { + "shape": "UARFCNDL", + "documentation": "WCDMA UTRA Absolute RF Channel Number downlink.
" + }, + "Psc": { + "shape": "PSC", + "documentation": "Primary Scrambling Code.
" + }, + "UtranCid": { + "shape": "UtranCid", + "documentation": "UTRAN (UMTS Terrestrial Radio Access Network) Cell Global Identifier.
" + }, + "Rscp": { + "shape": "RSCP", + "documentation": "Received Signal Code Power (signal power) (dBm)
" + }, + "PathLoss": { + "shape": "PathLoss", + "documentation": "Path loss, or path attenuation, is the reduction in power density of an electromagnetic wave as it propagates through space.
" + } + }, + "documentation": "Network Measurement Reports.
" + }, + "WcdmaObj": { + "type": "structure", + "required": [ + "Mcc", + "Mnc", + "UtranCid" + ], + "members": { + "Mcc": { + "shape": "MCC", + "documentation": "Mobile Country Code.
" + }, + "Mnc": { + "shape": "MNC", + "documentation": "Mobile Network Code.
" + }, + "Lac": { + "shape": "LAC", + "documentation": "Location Area Code.
" + }, + "UtranCid": { + "shape": "UtranCid", + "documentation": "UTRAN (UMTS Terrestrial Radio Access Network) Cell Global Identifier.
" + }, + "WcdmaLocalId": { + "shape": "WcdmaLocalId", + "documentation": "WCDMA local ID information.
" + }, + "Rscp": { + "shape": "RSCP", + "documentation": "Received Signal Code Power (signal power) (dBm).
" + }, + "PathLoss": { + "shape": "PathLoss", + "documentation": "Path loss, or path attenuation, is the reduction in power density of an electromagnetic wave as it propagates through space.
" + }, + "WcdmaNmr": { + "shape": "WcdmaNmrList", + "documentation": "WCDMA object for network measurement reports.
" + } + }, + "documentation": "WCDMA.
" + }, + "WiFiAccessPoint": { + "type": "structure", + "required": [ + "MacAddress", + "Rss" + ], + "members": { + "MacAddress": { + "shape": "MacAddress", + "documentation": "Wi-Fi MAC Address.
" + }, + "Rss": { + "shape": "RSS", + "documentation": "Recived signal strength of the WLAN measurement data.
" + } + }, + "documentation": "Wi-Fi access point.
" + }, + "WiFiAccessPoints": { + "type": "list", + "member": { + "shape": "WiFiAccessPoint" + } + }, "WirelessDeviceArn": { "type": "string" }, diff --git a/apis/kendra-2019-02-03.min.json b/apis/kendra-2019-02-03.min.json index c86f68e00f..40f8edd260 100644 --- a/apis/kendra-2019-02-03.min.json +++ b/apis/kendra-2019-02-03.min.json @@ -1773,6 +1773,7 @@ "members": { "Id": {}, "Type": {}, + "Format": {}, "AdditionalAttributes": { "type": "list", "member": { @@ -1789,7 +1790,7 @@ "type": "structure", "members": { "TextWithHighlightsValue": { - "shape": "Sd9" + "shape": "Sda" } } } @@ -1798,10 +1799,10 @@ }, "DocumentId": {}, "DocumentTitle": { - "shape": "Sd9" + "shape": "Sda" }, "DocumentExcerpt": { - "shape": "Sd9" + "shape": "Sda" }, "DocumentURI": {}, "DocumentAttributes": { @@ -1813,12 +1814,46 @@ "ScoreConfidence": {} } }, - "FeedbackToken": {} + "FeedbackToken": {}, + "TableExcerpt": { + "type": "structure", + "members": { + "Rows": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Cells": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Value": {}, + "TopAnswer": { + "type": "boolean" + }, + "Highlighted": { + "type": "boolean" + }, + "Header": { + "type": "boolean" + } + } + } + } + } + } + }, + "TotalNumberOfRows": { + "type": "integer" + } + } + } } } }, "FacetResults": { - "shape": "Sdg" + "shape": "Sdm" }, "TotalNumberOfResults": { "type": "integer" @@ -3580,7 +3615,7 @@ } } }, - "Sd9": { + "Sda": { "type": "structure", "members": { "Text": {}, @@ -3608,7 +3643,7 @@ } } }, - "Sdg": { + "Sdm": { "type": "list", "member": { "type": "structure", @@ -3627,7 +3662,7 @@ "type": "integer" }, "FacetResults": { - "shape": "Sdg" + "shape": "Sdm" } } } diff --git a/apis/kendra-2019-02-03.normal.json b/apis/kendra-2019-02-03.normal.json index 18a462c990..1d7f136383 100644 --- a/apis/kendra-2019-02-03.normal.json +++ b/apis/kendra-2019-02-03.normal.json @@ -2407,7 +2407,7 @@ "members": { "DocumentId": { "shape": "DocumentId", - "documentation": "The unique identifier of the document whose status could not be retrieved.
" + "documentation": "The identifier of the document whose status could not be retrieved.
" }, "ErrorCode": { "shape": "ErrorCode", @@ -2465,7 +2465,7 @@ "members": { "Id": { "shape": "DocumentId", - "documentation": "The unique identifier of the document.
" + "documentation": "The identifier of the document.
" }, "ErrorCode": { "shape": "ErrorCode", @@ -2503,7 +2503,7 @@ }, "SecretArn": { "shape": "SecretArn", - "documentation": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your Box platform. The secret must contain a JSON structure with the following keys:
clientID—The identifier of the client OAuth 2.0 authentication application created in Box.
clientSecret—A set of characters known only to the OAuth 2.0 authentication application created in Box.
publicKeyId—The identifier of the public key contained within an identity certificate.
privateKey—A set of characters that make up an encryption key.
passphrase—A set of characters that act like a password.
You create an application in Box to generate the keys or credentials required for the secret. For more information, see Authentication for a Box data source.
" + "documentation": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your Box platform. The secret must contain a JSON structure with the following keys:
clientID—The identifier of the client OAuth 2.0 authentication application created in Box.
clientSecret—A set of characters known only to the OAuth 2.0 authentication application created in Box.
publicKeyId—The identifier of the public key contained within an identity certificate.
privateKey—A set of characters that make up an encryption key.
passphrase—A set of characters that act like a password.
You create an application in Box to generate the keys or credentials required for the secret. For more information, see Using a Box data source.
" }, "UseChangeLog": { "shape": "Boolean", @@ -2605,7 +2605,7 @@ "members": { "ResultId": { "shape": "ResultId", - "documentation": "The unique identifier of the search result that was clicked.
" + "documentation": "The identifier of the search result that was clicked.
" }, "ClickTime": { "shape": "Timestamp", @@ -2635,7 +2635,7 @@ "members": { "DocumentIdColumnName": { "shape": "ColumnName", - "documentation": "The column that provides the document's unique identifier.
" + "documentation": "The column that provides the document's identifier.
" }, "DocumentDataColumnName": { "shape": "ColumnName", @@ -2805,7 +2805,7 @@ }, "SecretArn": { "shape": "SecretArn", - "documentation": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the Confluence instance. If you use Confluence Cloud, you use a generated API token as the password. For more information, see Using a Confluence data source.
You can also provide authentication credentials in the form of a personal access token. For more information, see Authentication for a Confluence data source.
" + "documentation": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the Confluence instance. If you use Confluence Cloud, you use a generated API token as the password.
You can also provide authentication credentials in the form of a personal access token. For more information, see Using a Confluence data source.
" }, "Version": { "shape": "ConfluenceVersion", @@ -3294,7 +3294,7 @@ "members": { "Id": { "shape": "FaqId", - "documentation": "The unique identifier of the FAQ.
" + "documentation": "The identifier of the FAQ.
" } } }, @@ -3353,7 +3353,7 @@ "members": { "Id": { "shape": "IndexId", - "documentation": "The unique identifier of the index. Use this identifier when you query an index, set up a data source, or index a document.
" + "documentation": "The identifier of the index. Use this identifier when you query an index, set up a data source, or index a document.
" } } }, @@ -3402,7 +3402,7 @@ "members": { "Id": { "shape": "QuerySuggestionsBlockListId", - "documentation": "The unique identifier of the created block list.
" + "documentation": "The identifier of the created block list.
" } } }, @@ -3451,7 +3451,7 @@ "members": { "Id": { "shape": "ThesaurusId", - "documentation": "The unique identifier of the thesaurus.
" + "documentation": "The identifier of the thesaurus.
" } } }, @@ -3644,7 +3644,7 @@ }, "Id": { "shape": "DataSourceId", - "documentation": "The unique identifier for the data source.
" + "documentation": "The identifier for the data source.
" }, "Type": { "shape": "DataSourceType", @@ -3667,7 +3667,7 @@ "documentation": "The code for a language. This shows a supported language for all documents in the data source. English is supported by default. For more information on supported languages, including their codes, see Adding documents in languages other than English.
" } }, - "documentation": "Summary information for an Amazon Kendra data source. Returned in a call to the DescribeDataSource
API.
Summary information for a Amazon Kendra data source.
" }, "DataSourceSummaryList": { "type": "list", @@ -3680,7 +3680,7 @@ "members": { "ExecutionId": { "shape": "String", - "documentation": "A unique identifier for the synchronization job.
" + "documentation": "A identifier for the synchronization job.
" }, "StartTime": { "shape": "Timestamp", @@ -4723,7 +4723,7 @@ "members": { "Id": { "shape": "DocumentId", - "documentation": "A unique identifier of the document in the index.
Note, each document ID must be unique per index. You cannot create a data source to index your documents with their unique IDs and then use the BatchPutDocument
API to index the same documents, or vice versa. You can delete a data source and then use the BatchPutDocument
API to index the same documents, or vice versa.
A identifier of the document in the index.
Note, each document ID must be unique per index. You cannot create a data source to index your documents with their unique IDs and then use the BatchPutDocument
API to index the same documents, or vice versa. You can delete a data source and then use the BatchPutDocument
API to index the same documents, or vice versa.
A list of strings.
" + "documentation": "A list of strings. The default maximum length or number of strings is 10.
" }, "LongValue": { "shape": "Long", @@ -4924,7 +4924,7 @@ "members": { "DocumentId": { "shape": "DocumentId", - "documentation": "The unique identifier of the document.
" + "documentation": "The identifier of the document.
" }, "Attributes": { "shape": "DocumentAttributeList", @@ -5455,7 +5455,7 @@ "members": { "Id": { "shape": "FaqId", - "documentation": "The unique identifier of the FAQ.
" + "documentation": "The identifier of the FAQ.
" }, "Name": { "shape": "FaqName", @@ -5584,7 +5584,7 @@ "members": { "QuerySuggestionsId": { "shape": "QuerySuggestionsId", - "documentation": "The unique identifier for a list of query suggestions for an index.
" + "documentation": "The identifier for a list of query suggestions for an index.
" }, "Suggestions": { "shape": "SuggestionList", @@ -5663,7 +5663,7 @@ }, "SecretArn": { "shape": "SecretArn", - "documentation": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your GitHub. The secret must contain a JSON structure with the following keys:
githubToken—The access token created in GitHub. For more information on creating a token in GitHub, see Authentication for a GitHub data source.
The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs required to connect to your GitHub. The secret must contain a JSON structure with the following keys:
personalToken—The access token created in GitHub. For more information on creating a token in GitHub, see Using a GitHub data source.
The identifier of the index.
" + "documentation": "The name of the index.
" }, "Id": { "shape": "IndexId", - "documentation": "A unique identifier for the index. Use this to identify the index when you are using APIs such as Query
, DescribeIndex
, UpdateIndex
, and DeleteIndex
.
A identifier for the index. Use this to identify the index when you are using APIs such as Query
, DescribeIndex
, UpdateIndex
, and DeleteIndex
.
Indicates whether the index is a enterprise edition index or a developer edition index.
" + "documentation": "Indicates whether the index is a Enterprise Edition index or a Developer Edition index.
" }, "CreatedAt": { "shape": "Timestamp", @@ -6190,7 +6190,7 @@ }, "SecretArn": { "shape": "SecretArn", - "documentation": "The Amazon Resource Name (ARN) of a secret in Secrets Manager contains the key-value pairs required to connect to your Jira data source. The secret must contain a JSON structure with the following keys:
jiraId—The Jira username.
jiraCredentials—The Jira API token. For more information on creating an API token in Jira, see Authentication for a Jira data source.
The Amazon Resource Name (ARN) of a secret in Secrets Manager contains the key-value pairs required to connect to your Jira data source. The secret must contain a JSON structure with the following keys:
jiraId—The Jira username.
jiraCredentials—The Jira API token. For more information on creating an API token in Jira, see Using a Jira data source.
If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of data source connectors (DataSourceSummaryItems
).
If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of data source connectors.
" }, "MaxResults": { "shape": "MaxResultsIntegerForListDataSourcesRequest", @@ -6634,11 +6634,11 @@ "members": { "NextToken": { "shape": "NextToken", - "documentation": "If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of indexes (DataSourceSummaryItems
).
If the previous response was incomplete (because there is more data to retrieve), Amazon Kendra returns a pagination token in the response. You can use this pagination token to retrieve the next set of indexes.
" }, "MaxResults": { "shape": "MaxResultsIntegerForListIndicesRequest", - "documentation": "The maximum number of data sources to return.
" + "documentation": "The maximum number of indices to return.
" } } }, @@ -7229,11 +7229,11 @@ "members": { "IndexId": { "shape": "IndexId", - "documentation": "The unique identifier of the index to search. The identifier is returned in the response from the CreateIndex
API.
The identifier of the index to search. The identifier is returned in the response from the CreateIndex
API.
The text to search for.
" + "documentation": "The input query text for the search. Amazon Kendra truncates queries at 30 token words, which excludes punctuation and stop words. Truncation still applies if you use Boolean or more advanced, complex queries.
" }, "AttributeFilter": { "shape": "AttributeFilter", @@ -7286,7 +7286,7 @@ "members": { "QueryId": { "shape": "QueryId", - "documentation": "The unique identifier for the search. You use QueryId
to identify the search when using the feedback API.
The identifier for the search. You use QueryId
to identify the search when using the feedback API.
The unique identifier for the query result.
" + "documentation": "The identifier for the query result.
" }, "Type": { "shape": "QueryResultType", - "documentation": "The type of document.
" + "documentation": "The type of document within the response. For example, a response could include a question-answer that's relevant to the query.
" + }, + "Format": { + "shape": "QueryResultFormat", + "documentation": "If the Type
of document within the response is ANSWER
, then it is either a TABLE
answer or TEXT
answer. If it's a table answer, a table excerpt is returned in TableExcerpt
. If it's a text answer, a text excerpt is returned in DocumentExcerpt
.
The unique identifier for the document.
" + "documentation": "The identifier for the document.
" }, "DocumentTitle": { "shape": "TextWithHighlights", @@ -7351,7 +7362,11 @@ }, "FeedbackToken": { "shape": "FeedbackToken", - "documentation": "A token that identifies a particular result from a particular query. Use this token to provide click-through feedback for the result. For more information, see Submitting feedback .
" + "documentation": "A token that identifies a particular result from a particular query. Use this token to provide click-through feedback for the result. For more information, see Submitting feedback .
" + }, + "TableExcerpt": { + "shape": "TableExcerpt", + "documentation": "An excerpt from a table within a document.
" } }, "documentation": "A single query result.
A query result contains information about a document returned by the query. This includes the original location of the document, a list of attributes assigned to the document, and relevant text from the document that satisfies the query.
" @@ -7459,7 +7474,7 @@ }, "SecretArn": { "shape": "SecretArn", - "documentation": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs that are required to connect to your Quip. The secret must contain a JSON structure with the following keys:
accessToken—The token created in Quip. For more information, see Authentication for a Quip data source.
The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the key-value pairs that are required to connect to your Quip. The secret must contain a JSON structure with the following keys:
accessToken—The token created in Quip. For more information, see Using a Quip data source.
The unique identifier of the search result that the user provided relevance feedback for.
" + "documentation": "The identifier of the search result that the user provided relevance feedback for.
" }, "RelevanceValue": { "shape": "RelevanceType", @@ -8053,7 +8068,7 @@ }, "SecretArn": { "shape": "SecretArn", - "documentation": "The Amazon Resource Name (ARN) of the Secrets Manager secret that contains the user name and password required to connect to the ServiceNow instance. You can also provide OAuth authentication credentials of user name, password, client ID, and client secret. For more information, see Authentication for a ServiceNow data source.
" + "documentation": "The Amazon Resource Name (ARN) of the Secrets Manager secret that contains the user name and password required to connect to the ServiceNow instance. You can also provide OAuth authentication credentials of user name, password, client ID, and client secret. For more information, see Using a ServiceNow data source.
" }, "ServiceNowBuildVersion": { "shape": "ServiceNowBuildVersionType", @@ -8174,7 +8189,7 @@ }, "SecretArn": { "shape": "SecretArn", - "documentation": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the SharePoint instance. If you use SharePoint Server, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source.
You can also provide OAuth authentication credentials of user name, password, client ID, and client secret. For more information, see Authentication for a SharePoint data source.
" + "documentation": "The Amazon Resource Name (ARN) of an Secrets Manager secret that contains the user name and password required to connect to the SharePoint instance. If you use SharePoint Server, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source.
You can also provide OAuth authentication credentials of user name, password, client ID, and client secret. For more information, see Using a SharePoint data source.
" }, "CrawlAttachments": { "shape": "Boolean", @@ -8500,7 +8515,7 @@ "members": { "DocumentId": { "shape": "DocumentId", - "documentation": "The unique identifier of the document.
" + "documentation": "The identifier of the document.
" }, "DocumentStatus": { "shape": "DocumentStatus", @@ -8598,11 +8613,11 @@ "members": { "Id": { "shape": "ResultId", - "documentation": "The unique UUID (universally unique identifier) of a single query suggestion.
" + "documentation": "The UUID (universally unique identifier) of a single query suggestion.
" }, "Value": { "shape": "SuggestionValue", - "documentation": "The value for the unique UUID (universally unique identifier) of a single query suggestion.
The value is the text string of a suggestion.
" + "documentation": "The value for the UUID (universally unique identifier) of a single query suggestion.
The value is the text string of a suggestion.
" } }, "documentation": "A single query suggestion.
" @@ -8661,12 +8676,70 @@ }, "documentation": "The SuggestionTextWithHighlights
structure information.
The actual value or content within a table cell. A table cell could contain a date value of a year, or a string value of text, for example.
" + }, + "TopAnswer": { + "shape": "Boolean", + "documentation": " TRUE
if the response of the table cell is the top answer. This is the cell value or content with the highest confidence score or is the most relevant to the query.
TRUE
means that the table cell has a high enough confidence and is relevant to the query, so the value or content should be highlighted.
TRUE
means that the table cell should be treated as a header.
Provides information about a table cell in a table excerpt.
" + }, + "TableCellList": { + "type": "list", + "member": { + "shape": "TableCell" + } + }, + "TableExcerpt": { + "type": "structure", + "members": { + "Rows": { + "shape": "TableRowList", + "documentation": "A list of rows in the table excerpt.
" + }, + "TotalNumberOfRows": { + "shape": "Integer", + "documentation": "A count of the number of rows in the original table within the document.
" + } + }, + "documentation": "An excerpt from a table within a document. The table excerpt displays up to five columns and three rows, depending on how many table cells are relevant to the query and how many columns are available in the original table. The top most relevant cell is displayed in the table excerpt, along with the next most relevant cells.
" + }, "TableName": { "type": "string", "max": 100, "min": 1, "pattern": "^[a-zA-Z][a-zA-Z0-9_]*$" }, + "TableRow": { + "type": "structure", + "members": { + "Cells": { + "shape": "TableCellList", + "documentation": "A list of table cells in a row.
" + } + }, + "documentation": "Information about a row in a table excerpt.
" + }, + "TableRowList": { + "type": "list", + "member": { + "shape": "TableRow" + } + }, "Tag": { "type": "structure", "required": [ diff --git a/apis/logs-2014-03-28.min.json b/apis/logs-2014-03-28.min.json index fff333276b..87015f7869 100644 --- a/apis/logs-2014-03-28.min.json +++ b/apis/logs-2014-03-28.min.json @@ -94,6 +94,17 @@ } } }, + "DeleteDataProtectionPolicy": { + "input": { + "type": "structure", + "required": [ + "logGroupIdentifier" + ], + "members": { + "logGroupIdentifier": {} + } + } + }, "DeleteDestination": { "input": { "type": "structure", @@ -210,7 +221,7 @@ "destinations": { "type": "list", "member": { - "shape": "S11" + "shape": "S13" } }, "nextToken": {} @@ -277,10 +288,18 @@ "input": { "type": "structure", "members": { + "accountIdentifiers": { + "type": "list", + "member": {} + }, "logGroupNamePrefix": {}, + "logGroupNamePattern": {}, "nextToken": {}, "limit": { "type": "integer" + }, + "includeLinkedAccounts": { + "type": "boolean" } } }, @@ -306,7 +325,8 @@ "storedBytes": { "type": "long" }, - "kmsKeyId": {} + "kmsKeyId": {}, + "dataProtectionStatus": {} } } }, @@ -322,6 +342,7 @@ ], "members": { "logGroupName": {}, + "logGroupIdentifier": {}, "logStreamNamePrefix": {}, "orderBy": {}, "descending": { @@ -393,7 +414,7 @@ "filterName": {}, "filterPattern": {}, "metricTransformations": { - "shape": "S1z" + "shape": "S26" }, "creationTime": { "type": "long" @@ -466,7 +487,7 @@ "type": "long" }, "logGroupNames": { - "shape": "S2l" + "shape": "S2s" } } } @@ -491,7 +512,7 @@ "resourcePolicies": { "type": "list", "member": { - "shape": "S2p" + "shape": "S2w" } }, "nextToken": {} @@ -556,6 +577,7 @@ ], "members": { "logGroupName": {}, + "logGroupIdentifier": {}, "logStreamNames": { "type": "list", "member": {} @@ -576,6 +598,9 @@ "deprecated": true, "deprecatedMessage": "Starting on June 17, 2019, this parameter will be ignored and the value will be assumed to be true. The response from this operation will always interleave events from multiple log streams within a log group.", "type": "boolean" + }, + "unmask": { + "type": "boolean" } } }, @@ -615,6 +640,27 @@ } } }, + "GetDataProtectionPolicy": { + "input": { + "type": "structure", + "required": [ + "logGroupIdentifier" + ], + "members": { + "logGroupIdentifier": {} + } + }, + "output": { + "type": "structure", + "members": { + "logGroupIdentifier": {}, + "policyDocument": {}, + "lastUpdatedTime": { + "type": "long" + } + } + } + }, "GetLogEvents": { "input": { "type": "structure", @@ -624,6 +670,7 @@ ], "members": { "logGroupName": {}, + "logGroupIdentifier": {}, "logStreamName": {}, "startTime": { "type": "long" @@ -637,6 +684,9 @@ }, "startFromHead": { "type": "boolean" + }, + "unmask": { + "type": "boolean" } } }, @@ -673,7 +723,8 @@ "logGroupName": {}, "time": { "type": "long" - } + }, + "logGroupIdentifier": {} } }, "output": { @@ -701,7 +752,10 @@ "logRecordPointer" ], "members": { - "logRecordPointer": {} + "logRecordPointer": {}, + "unmask": { + "type": "boolean" + } } }, "output": { @@ -803,6 +857,29 @@ "deprecated": true, "deprecatedMessage": "Please use the generic tagging API ListTagsForResource" }, + "PutDataProtectionPolicy": { + "input": { + "type": "structure", + "required": [ + "logGroupIdentifier", + "policyDocument" + ], + "members": { + "logGroupIdentifier": {}, + "policyDocument": {} + } + }, + "output": { + "type": "structure", + "members": { + "logGroupIdentifier": {}, + "policyDocument": {}, + "lastUpdatedTime": { + "type": "long" + } + } + } + }, "PutDestination": { "input": { "type": "structure", @@ -824,7 +901,7 @@ "type": "structure", "members": { "destination": { - "shape": "S11" + "shape": "S13" } } } @@ -910,7 +987,7 @@ "filterName": {}, "filterPattern": {}, "metricTransformations": { - "shape": "S1z" + "shape": "S26" } } } @@ -926,7 +1003,7 @@ "name": {}, "queryDefinitionId": {}, "logGroupNames": { - "shape": "S2l" + "shape": "S2s" }, "queryString": {} } @@ -950,7 +1027,7 @@ "type": "structure", "members": { "resourcePolicy": { - "shape": "S2p" + "shape": "S2w" } } } @@ -1000,7 +1077,11 @@ "members": { "logGroupName": {}, "logGroupNames": { - "shape": "S2l" + "shape": "S2s" + }, + "logGroupIdentifiers": { + "type": "list", + "member": {} }, "startTime": { "type": "long" @@ -1155,7 +1236,7 @@ "key": {}, "value": {} }, - "S11": { + "S13": { "type": "structure", "members": { "destinationName": {}, @@ -1168,7 +1249,7 @@ } } }, - "S1z": { + "S26": { "type": "list", "member": { "type": "structure", @@ -1193,11 +1274,11 @@ } } }, - "S2l": { + "S2s": { "type": "list", "member": {} }, - "S2p": { + "S2w": { "type": "structure", "members": { "policyName": {}, diff --git a/apis/logs-2014-03-28.normal.json b/apis/logs-2014-03-28.normal.json index 6df4bc52cf..fff76304fe 100644 --- a/apis/logs-2014-03-28.normal.json +++ b/apis/logs-2014-03-28.normal.json @@ -35,7 +35,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Associates the specified Key Management Service customer master key (CMK) with the specified log group.
Associating an KMS CMK with a log group overrides any existing associations between the log group and a CMK. After a CMK is associated with a log group, all newly ingested data for the log group is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
CloudWatch Logs supports only symmetric CMKs. Do not use an associate an asymmetric CMK with your log group. For more information, see Using Symmetric and Asymmetric Keys.
It can take up to 5 minutes for this operation to take effect.
If you attempt to associate a CMK with a log group but the CMK does not exist or the CMK is disabled, you receive an InvalidParameterException
error.
Associates the specified KMS key with the specified log group.
Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS keyis still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.
It can take up to 5 minutes for this operation to take effect.
If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException
error.
Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask
operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.
Exporting log data to Amazon S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.
Exporting to S3 buckets that are encrypted with AES-256 is supported.
This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING
or PENDING
) export task at a time. To cancel an export task, use CancelExportTask.
You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log fild data by using Linux utilities.
Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask
operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.
Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.
Exporting to S3 buckets that are encrypted with AES-256 is supported.
This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING
or PENDING
) export task at a time. To cancel an export task, use CancelExportTask.
You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.
Creates a log group with the specified name. You can create up to 20,000 log groups per account.
You must use the following guidelines when naming a log group:
Log group names must be unique within a region for an Amazon Web Services account.
Log group names can be between 1 and 512 characters long.
Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)
When you create a log group, by default the log events in the log group never expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy.
If you associate a Key Management Service customer master key (CMK) with the log group, ingested data is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
If you attempt to associate a CMK with the log group but the CMK does not exist or the CMK is disabled, you receive an InvalidParameterException
error.
CloudWatch Logs supports only symmetric CMKs. Do not associate an asymmetric CMK with your log group. For more information, see Using Symmetric and Asymmetric Keys.
Creates a log group with the specified name. You can create up to 20,000 log groups per account.
You must use the following guidelines when naming a log group:
Log group names must be unique within a Region for an Amazon Web Services account.
Log group names can be between 1 and 512 characters long.
Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)
When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy.
If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
If you attempt to associate a KMS key with the log group but the KMS keydoes not exist or the KMS key is disabled, you receive an InvalidParameterException
error.
CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.
Creates a log stream for the specified log group. A log stream is a sequence of log events that originate from a single source, such as an application instance or a resource that is being monitored.
There is no limit on the number of log streams that you can create for a log group. There is a limit of 50 TPS on CreateLogStream
operations, after which transactions are throttled.
You must use the following guidelines when naming a log stream:
Log stream names must be unique within the log group.
Log stream names can be between 1 and 512 characters long.
The ':' (colon) and '*' (asterisk) characters are not allowed.
Creates a log stream for the specified log group. A log stream is a sequence of log events that originate from a single source, such as an application instance or a resource that is being monitored.
There is no limit on the number of log streams that you can create for a log group. There is a limit of 50 TPS on CreateLogStream
operations, after which transactions are throttled.
You must use the following guidelines when naming a log stream:
Log stream names must be unique within the log group.
Log stream names can be between 1 and 512 characters long.
Don't use ':' (colon) or '*' (asterisk) characters.
Deletes the data protection policy from the specified log group.
For more information about data protection policies, see PutDataProtectionPolicy.
" }, "DeleteDestination": { "name": "DeleteDestination", @@ -410,7 +435,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.
CloudWatch Logs doesn’t support IAM policies that control access to the DescribeLogGroups
action by using the aws:ResourceTag/key-name
condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name
condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.
Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.
CloudWatch Logs doesn’t support IAM policies that control access to the DescribeLogGroups
action by using the aws:ResourceTag/key-name
condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name
condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
" }, "DescribeLogStreams": { "name": "DescribeLogStreams", @@ -435,7 +460,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.
This operation has a limit of five transactions per second, after which transactions are throttled.
" + "documentation": "Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.
This operation has a limit of five transactions per second, after which transactions are throttled.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
" }, "DescribeMetricFilters": { "name": "DescribeMetricFilters", @@ -485,7 +510,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Returns a list of CloudWatch Logs Insights queries that are scheduled, executing, or have been executed recently in this account. You can request all queries or limit it to queries of a specific log group or queries with a certain status.
" + "documentation": "Returns a list of CloudWatch Logs Insights queries that are scheduled, running, or have been run recently in this account. You can request all queries or limit it to queries of a specific log group or queries with a certain status.
" }, "DescribeQueryDefinitions": { "name": "DescribeQueryDefinitions", @@ -579,7 +604,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Disassociates the associated Key Management Service customer master key (CMK) from the specified log group.
After the KMS CMK is disassociated from the log group, CloudWatch Logs stops encrypting newly ingested data for the log group. All previously ingested data remains encrypted, and CloudWatch Logs requires permissions for the CMK whenever the encrypted data is requested.
Note that it can take up to 5 minutes for this operation to take effect.
" + "documentation": "Disassociates the associated KMS key from the specified log group.
After the KMS key is disassociated from the log group, CloudWatch Logs stops encrypting newly ingested data for the log group. All previously ingested data remains encrypted, and CloudWatch Logs requires permissions for the KMS key whenever the encrypted data is requested.
Note that it can take up to 5 minutes for this operation to take effect.
" }, "FilterLogEvents": { "name": "FilterLogEvents", @@ -604,7 +629,35 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream.
You must have the logs;FilterLogEvents
permission to perform this operation.
By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the time range that you specify. If the results include a token, then there are more log events available, and you can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token.
The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents
request.
Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream.
You must have the logs;FilterLogEvents
permission to perform this operation.
By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token.
The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents
request.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
" + }, + "GetDataProtectionPolicy": { + "name": "GetDataProtectionPolicy", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "GetDataProtectionPolicyRequest" + }, + "output": { + "shape": "GetDataProtectionPolicyResponse" + }, + "errors": [ + { + "shape": "InvalidParameterException" + }, + { + "shape": "OperationAbortedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ServiceUnavailableException" + } + ], + "documentation": "Returns information about a log group data protection policy.
" }, "GetLogEvents": { "name": "GetLogEvents", @@ -629,7 +682,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Lists log events from the specified log stream. You can list all of the log events or filter using a time range.
By default, this operation returns as many log events as can fit in a response size of 1MB (up to 10,000 log events). You can get additional log events by specifying one of the tokens in a subsequent call. This operation can return empty results while there are more log events available through the token.
" + "documentation": "Lists log events from the specified log stream. You can list all of the log events or filter using a time range.
By default, this operation returns as many log events as can fit in a response size of 1MB (up to 10,000 log events). You can get additional log events by specifying one of the tokens in a subsequent call. This operation can return empty results while there are more log events available through the token.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
" }, "GetLogGroupFields": { "name": "GetLogGroupFields", @@ -657,7 +710,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Returns a list of the fields that are included in log events in the specified log group, along with the percentage of log events that contain each field. The search is limited to a time period that you specify.
In the results, fields that start with @ are fields generated by CloudWatch Logs. For example, @timestamp
is the timestamp of each log event. For more information about the fields that are generated by CloudWatch logs, see Supported Logs and Discovered Fields.
The response results are sorted by the frequency percentage, starting with the highest percentage.
" + "documentation": "Returns a list of the fields that are included in log events in the specified log group. Includes the percentage of log events that contain each field. The search is limited to a time period that you specify.
In the results, fields that start with @
are fields generated by CloudWatch Logs. For example, @timestamp
is the timestamp of each log event. For more information about the fields that are generated by CloudWatch logs, see Supported Logs and Discovered Fields.
The response results are sorted by the frequency percentage, starting with the highest percentage.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
" }, "GetLogRecord": { "name": "GetLogRecord", @@ -710,7 +763,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Returns the results from the specified query.
Only the fields requested in the query are returned, along with a @ptr
field, which is the identifier for the log record. You can use the value of @ptr
in a GetLogRecord operation to get the full log record.
GetQueryResults
does not start a query execution. To run a query, use StartQuery.
If the value of the Status
field in the output is Running
, this operation returns only partial results. If you see a value of Scheduled
or Running
for the status, you can retry the operation later to see the final results.
Returns the results from the specified query.
Only the fields requested in the query are returned, along with a @ptr
field, which is the identifier for the log record. You can use the value of @ptr
in a GetLogRecord operation to get the full log record.
GetQueryResults
does not start running a query. To run a query, use StartQuery.
If the value of the Status
field in the output is Running
, this operation returns only partial results. If you see a value of Scheduled
or Running
for the status, you can retry the operation later to see the final results.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start queries in linked source accounts. For more information, see CloudWatch cross-account observability.
" }, "ListTagsForResource": { "name": "ListTagsForResource", @@ -761,6 +814,37 @@ "deprecated": true, "deprecatedMessage": "Please use the generic tagging API ListTagsForResource" }, + "PutDataProtectionPolicy": { + "name": "PutDataProtectionPolicy", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "PutDataProtectionPolicyRequest" + }, + "output": { + "shape": "PutDataProtectionPolicyResponse" + }, + "errors": [ + { + "shape": "InvalidParameterException" + }, + { + "shape": "LimitExceededException" + }, + { + "shape": "OperationAbortedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ServiceUnavailableException" + } + ], + "documentation": "Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data.
Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked.
By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask
permission can use a GetLogEvents or FilterLogEvents operation with the unmask
parameter set to true
to view the unmasked log events. Users with the logs:Unmask
can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask
query command.
For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
" + }, "PutDestination": { "name": "PutDestination", "http": { @@ -784,7 +868,7 @@ "shape": "ServiceUnavailableException" } ], - "documentation": "Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.
A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents.
Through an access policy, a destination controls what is written to it. By default, PutDestination
does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination
.
To perform a PutDestination
operation, you must also have the iam:PassRole
permission.
Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.
A destination encapsulates a physical resource (such as an Amazon Kinesis stream). With a destination, you can subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents.
Through an access policy, a destination controls what is written to it. By default, PutDestination
does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination
.
To perform a PutDestination
operation, you must also have the iam:PassRole
permission.
Uploads a batch of log events to the specified log stream.
You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token in the expectedSequenceToken
field from InvalidSequenceTokenException
. If you call PutLogEvents
twice within a narrow time period using the same value for sequenceToken
, both calls might be successful or one might be rejected.
The batch of events must satisfy the following constraints:
The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
None of the log events in the batch can be more than 2 hours in the future.
None of the log events in the batch can be older than 14 days or older than the retention period of the log group.
The log events in the batch must be in chronological order by their timestamp. The timestamp is the time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.)
A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.
The maximum number of log events in a batch is 10,000.
There is a quota of 5 requests per second per log stream. Additional requests are throttled. This quota can't be changed.
If a call to PutLogEvents
returns \"UnrecognizedClientException\" the most likely cause is an invalid Amazon Web Services access key ID or secret key.
Uploads a batch of log events to the specified log stream.
You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token in the expectedSequenceToken
field from InvalidSequenceTokenException
. If you call PutLogEvents
twice within a narrow time period using the same value for sequenceToken
, both calls might be successful or one might be rejected.
The batch of events must satisfy the following constraints:
The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
None of the log events in the batch can be more than 2 hours in the future.
None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group.
The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss
. For example, 2017-09-15T13:45:30
.)
A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.
The maximum number of log events in a batch is 10,000.
There is a quota of five requests per second per log stream. Additional requests are throttled. This quota can't be changed.
If a call to PutLogEvents
returns \"UnrecognizedClientException\" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.
Creates or updates a metric filter and associates it with the specified log group. Metric filters allow you to configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
To help prevent accidental high charges, Amazon disables a metric filter if it generates 1000 different name/value pairs for the dimensions that you have specified within a certain amount of time.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs disables a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within a certain amount of time. This helps to prevent accidental high charges.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a query definition for CloudWatch Logs Insights. For more information, see Analyzing Log Data with CloudWatch Logs Insights.
To update a query definition, specify its queryDefinitionId
in your request. The values of name
, queryString
, and logGroupNames
are changed to the values that you specify in your update operation. No current values are retained from the current query definition. For example, if you update a current query definition that includes log groups, and you don't specify the logGroupNames
parameter in your update operation, the query definition changes to contain no log groups.
You must have the logs:PutQueryDefinition
permission to be able to perform this operation.
Creates or updates a query definition for CloudWatch Logs Insights. For more information, see Analyzing Log Data with CloudWatch Logs Insights.
To update a query definition, specify its queryDefinitionId
in your request. The values of name
, queryString
, and logGroupNames
are changed to the values that you specify in your update operation. No current values are retained from the current query definition. For example, imagine updating a current query definition that includes log groups. If you don't specify the logGroupNames
parameter in your update operation, the query definition changes to contain no log groups.
You must have the logs:PutQueryDefinition
permission to be able to perform this operation.
Sets the retention of the specified log group. A retention policy allows you to configure the number of days for which to retain log events in the specified log group.
CloudWatch Logs doesn’t immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer.
This means that if you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven’t been actually deleted, those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours has passed after the end of the previous retention period, or you have confirmed that the older log events are deleted.
Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group.
CloudWatch Logs doesn’t immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer.
To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven’t been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted.
Creates or updates a subscription filter and associates it with the specified log group. Subscription filters allow you to subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the gzip format.
The following destinations are supported for subscription filters:
An Amazon Kinesis stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination that belongs to a different account, for cross-account delivery.
An Amazon Kinesis Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
To perform a PutSubscriptionFilter
operation, you must also have the iam:PassRole
permission.
Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination that belongs to a different account, for cross-account delivery.
An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
To perform a PutSubscriptionFilter
operation, you must also have the iam:PassRole
permission.
Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use.
For more information, see CloudWatch Logs Insights Query Syntax.
Queries time out after 15 minutes of execution. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.
You are limited to 20 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
" + "documentation": "Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use.
For more information, see CloudWatch Logs Insights Query Syntax.
Queries time out after 15 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery
operation, the query definition must be defined in the monitoring account.
You can have up to 20 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
" }, "StopQuery": { "name": "StopQuery", @@ -1146,6 +1230,20 @@ "type": "string", "min": 1 }, + "AccountId": { + "type": "string", + "max": 12, + "min": 12, + "pattern": "^\\d{12}$" + }, + "AccountIds": { + "type": "list", + "member": { + "shape": "AccountId" + }, + "max": 20, + "min": 0 + }, "AmazonResourceName": { "type": "string", "max": 1011, @@ -1168,7 +1266,7 @@ }, "kmsKeyId": { "shape": "KmsKeyId", - "documentation": "The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. This must be a symmetric CMK. For more information, see Amazon Resource Names - Key Management Service and Using Symmetric and Asymmetric Keys.
" + "documentation": "The Amazon Resource Name (ARN) of the KMS key to use when encrypting log data. This must be a symmetric KMS key. For more information, see Amazon Resource Names and Using Symmetric and Asymmetric Keys.
" } } }, @@ -1207,15 +1305,15 @@ }, "from": { "shape": "Timestamp", - "documentation": "The start time of the range for the request, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp earlier than this time are not exported.
" + "documentation": "The start time of the range for the request, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. Events with a timestamp earlier than this time are not exported.
The end time of the range for the request, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not exported.
You must specify a time that is not earlier than when this log group was created.
" + "documentation": "The end time of the range for the request, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. Events with a timestamp later than this time are not exported.
You must specify a time that is not earlier than when this log group was created.
" }, "destination": { "shape": "ExportDestinationBucket", - "documentation": "The name of S3 bucket for the exported log data. The bucket must be in the same Amazon Web Services region.
" + "documentation": "The name of S3 bucket for the exported log data. The bucket must be in the same Amazon Web Services Region.
" }, "destinationPrefix": { "shape": "ExportDestinationPrefix", @@ -1244,7 +1342,7 @@ }, "kmsKeyId": { "shape": "KmsKeyId", - "documentation": "The Amazon Resource Name (ARN) of the CMK to use when encrypting log data. For more information, see Amazon Resource Names - Key Management Service.
" + "documentation": "The Amazon Resource Name (ARN) of the KMS key to use when encrypting log data. For more information, see Amazon Resource Names.
" }, "tags": { "shape": "Tags", @@ -1269,13 +1367,37 @@ } } }, + "DataProtectionPolicyDocument": { + "type": "string" + }, + "DataProtectionStatus": { + "type": "string", + "enum": [ + "ACTIVATED", + "DELETED", + "ARCHIVED", + "DISABLED" + ] + }, "Days": { "type": "integer", - "documentation": "The number of days to retain the log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, and 3653.
To set a log group to never have log events expire, use DeleteRetentionPolicy.
" + "documentation": "The number of days to retain the log events in the specified log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 2192, 2557, 2922, 3288, and 3653.
To set a log group so that its log events do not expire, use DeleteRetentionPolicy.
" }, "DefaultValue": { "type": "double" }, + "DeleteDataProtectionPolicyRequest": { + "type": "structure", + "required": [ + "logGroupIdentifier" + ], + "members": { + "logGroupIdentifier": { + "shape": "LogGroupIdentifier", + "documentation": "The name or ARN of the log group that you want to delete the data protection policy for.
" + } + } + }, "DeleteDestinationRequest": { "type": "structure", "required": [ @@ -1430,7 +1552,7 @@ "members": { "taskId": { "shape": "ExportTaskId", - "documentation": "The ID of the export task. Specifying a task ID filters the results to zero or one export tasks.
" + "documentation": "The ID of the export task. Specifying a task ID filters the results to one or zero export tasks.
" }, "statusCode": { "shape": "ExportTaskStatusCode", @@ -1466,9 +1588,17 @@ "DescribeLogGroupsRequest": { "type": "structure", "members": { + "accountIdentifiers": { + "shape": "AccountIds", + "documentation": "When includeLinkedAccounts
is set to True
, use this parameter to specify the list of accounts to search. You can specify as many as 20 account IDs in the array.
The prefix to match.
" + "documentation": "The prefix to match.
logGroupNamePrefix
and logGroupNamePattern
are mutually exclusive. Only one of these parameters can be passed.
If you specify a string for this parameter, the operation returns only log groups that have names that match the string based on a case-sensitive substring search. For example, if you specify Foo
, log groups named FooBar
, aws/Foo
, and GroupFoo
would match, but foo
, F/o/o
and Froo
would not match.
logGroupNamePattern
and logGroupNamePrefix
are mutually exclusive. Only one of these parameters can be passed.
The maximum number of items returned. If you don't specify a value, the default is up to 50 items.
" + }, + "includeLinkedAccounts": { + "shape": "IncludeLinkedAccounts", + "documentation": "If you are using a monitoring account, set this to True
to have the operation return log groups in the accounts listed in accountIdentifiers
.
If this parameter is set to true
and accountIdentifiers
contains a null value, the operation returns all log groups in the monitoring account and all log groups in all source accounts that are linked to the monitoring account.
If you specify includeLinkedAccounts
in your request, then metricFilterCount
, retentionInDays
, and storedBytes
are not included in the response.
The log groups.
If the retentionInDays
value is not included for a log group, then that log group is set to have its events never expire.
The log groups.
If the retentionInDays
value is not included for a log group, then that log group's events do not expire.
The name of the log group.
" + "documentation": "The name of the log group.
If you specify values for both logGroupName
and logGroupIdentifier
, the action returns an InvalidParameterException
error.
Specify either the name or ARN of the log group to view. If the log group is in a source account and you are using a monitoring account, you must use the log group ARN.
If you specify values for both logGroupName
and logGroupIdentifier
, the action returns an InvalidParameterException
error.
If the value is LogStreamName
, the results are ordered by log stream name. If the value is LastEventTime
, the results are ordered by the event time. The default value is LogStreamName
.
If you order the results by event time, you cannot specify the logStreamNamePrefix
parameter.
lastEventTimestamp
represents the time of the most recent log event in the log stream in CloudWatch Logs. This number is expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. lastEventTimestamp
updates on an eventual consistency basis. It typically updates in less than an hour from ingestion, but in rare situations might take longer.
If the value is LogStreamName
, the results are ordered by log stream name. If the value is LastEventTime
, the results are ordered by the event time. The default value is LogStreamName
.
If you order the results by event time, you cannot specify the logStreamNamePrefix
parameter.
lastEventTimestamp
represents the time of the most recent log event in the log stream in CloudWatch Logs. This number is expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. lastEventTimestamp
updates on an eventual consistency basis. It typically updates in less than an hour from ingestion, but in rare situations might take longer.
The prefix to match. CloudWatch Logs uses the value you set here only if you also include the logGroupName
parameter in your request.
The prefix to match. CloudWatch Logs uses the value that you set here only if you also include the logGroupName
parameter in your request.
The start time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp before this time are not exported.
" + "documentation": "The start time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. Events with a timestamp before this time are not exported.
The end time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not exported.
" + "documentation": "The end time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. Events with a timestamp later than this time are not exported.
The creation time of the export task, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The creation time of the export task, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
The completion time of the export task, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The completion time of the export task, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
Represents the status of an export task.
" @@ -1931,7 +2069,11 @@ "members": { "logGroupName": { "shape": "LogGroupName", - "documentation": "The name of the log group to search.
" + "documentation": "The name of the log group to search.
If you specify values for both logGroupName
and logGroupIdentifier
, the action returns an InvalidParameterException
error.
Specify either the name or ARN of the log group to view log events from. If the log group is in a source account and you are using a monitoring account, you must use the log group ARN.
If you specify values for both logGroupName
and logGroupIdentifier
, the action returns an InvalidParameterException
error.
The start of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp before this time are not returned.
" + "documentation": "The start of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. Events with a timestamp before this time are not returned.
The end of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not returned.
" + "documentation": "The end of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. Events with a timestamp later than this time are not returned.
If the value is true, the operation makes a best effort to provide responses that contain events from multiple log streams within the log group, interleaved in a single response. If the value is false, all the matched log events in the first log stream are searched first, then those in the next log stream, and so on. The default is false.
Important: Starting on June 17, 2019, this parameter is ignored and the value is assumed to be true. The response from this operation always interleaves events from multiple log streams within a log group.
", + "documentation": "If the value is true, the operation attempts to provide responses that contain events from multiple log streams within the log group, interleaved in a single response. If the value is false, all the matched log events in the first log stream are searched first, then those in the next log stream, and so on.
Important As of June 17, 2019, this parameter is ignored and the value is assumed to be true. The response from this operation always interleaves events from multiple log streams within a log group.
", "deprecated": true, "deprecatedMessage": "Starting on June 17, 2019, this parameter will be ignored and the value will be assumed to be true. The response from this operation will always interleave events from multiple log streams within a log group." + }, + "unmask": { + "shape": "Unmask", + "documentation": "Specify true
to display the log event fields with all sensitive data unmasked and visible. The default is false
.
To use this operation with this parameter, you must be signed into an account with the logs:Unmask
permission.
IMPORTANT Starting on May 15, 2020, this parameter will be deprecated. This parameter will be an empty list after the deprecation occurs.
Indicates which log streams have been searched and whether each has been searched completely.
" + "documentation": "Important As of May 15, 2020, this parameter is no longer supported. This parameter returns an empty list.
Indicates which log streams have been searched and whether each has been searched completely.
" }, "nextToken": { "shape": "NextToken", @@ -2007,7 +2153,7 @@ }, "timestamp": { "shape": "Timestamp", - "documentation": "The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
The time the event was ingested, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The time the event was ingested, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
The name or ARN of the log group that contains the data protection policy that you want to see.
" + } + } + }, + "GetDataProtectionPolicyResponse": { + "type": "structure", + "members": { + "logGroupIdentifier": { + "shape": "LogGroupIdentifier", + "documentation": "The log group name or ARN that you specified in your request.
" + }, + "policyDocument": { + "shape": "DataProtectionPolicyDocument", + "documentation": "The data protection policy document for this log group.
" + }, + "lastUpdatedTime": { + "shape": "Timestamp", + "documentation": "The date and time that this policy was most recently updated.
" + } + } + }, "GetLogEventsRequest": { "type": "structure", "required": [ @@ -2042,7 +2217,11 @@ "members": { "logGroupName": { "shape": "LogGroupName", - "documentation": "The name of the log group.
" + "documentation": "The name of the log group.
If you specify values for both logGroupName
and logGroupIdentifier
, the action returns an InvalidParameterException
error.
Specify either the name or ARN of the log group to view events from. If the log group is in a source account and you are using a monitoring account, you must use the log group ARN.
If you specify values for both logGroupName
and logGroupIdentifier
, the action returns an InvalidParameterException
error.
The start of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp equal to this time or later than this time are included. Events with a timestamp earlier than this time are not included.
" + "documentation": "The start of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. Events with a timestamp equal to this time or later than this time are included. Events with a timestamp earlier than this time are not included.
The end of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp equal to or later than this time are not included.
" + "documentation": "The end of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. Events with a timestamp equal to or later than this time are not included.
The maximum number of log events returned. If you don't specify a value, the maximum is as many log events as can fit in a response size of 1 MB, up to 10,000 log events.
" + "documentation": "The maximum number of log events returned. If you don't specify a limit, the default is as many log events as can fit in a response size of 1 MB (up to 10,000 log events).
" }, "startFromHead": { "shape": "StartFromHead", "documentation": "If the value is true, the earliest log events are returned first. If the value is false, the latest log events are returned first. The default value is false.
If you are using a previous nextForwardToken
value as the nextToken
in this operation, you must specify true
for startFromHead
.
Specify true
to display the log event fields with all sensitive data unmasked and visible. The default is false
.
To use this operation with this parameter, you must be signed into an account with the logs:Unmask
permission.
The token for the next set of items in the backward direction. The token expires after 24 hours. This token is never null. If you have reached the end of the stream, it returns the same token you passed in.
" + "documentation": "The token for the next set of items in the backward direction. The token expires after 24 hours. This token is not null. If you have reached the end of the stream, it returns the same token you passed in.
" } } }, @@ -2095,11 +2278,15 @@ "members": { "logGroupName": { "shape": "LogGroupName", - "documentation": "The name of the log group to search.
" + "documentation": "The name of the log group to search.
If you specify values for both logGroupName
and logGroupIdentifier
, the action returns an InvalidParameterException
error.
The time to set as the center of the query. If you specify time
, the 15 minutes before this time are queries. If you omit time
the 8 minutes before and 8 minutes after this time are searched.
The time
value is specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.
The time to set as the center of the query. If you specify time
, the 15 minutes before this time are queries. If you omit time
, the 8 minutes before and 8 minutes after this time are searched.
The time
value is specified as epoch time, which is the number of seconds since January 1, 1970, 00:00:00 UTC
.
Specify either the name or ARN of the log group to view. If the log group is in a source account and you are using a monitoring account, you must specify the ARN.
If you specify values for both logGroupName
and logGroupIdentifier
, the action returns an InvalidParameterException
error.
The pointer corresponding to the log event record you want to retrieve. You get this from the response of a GetQueryResults
operation. In that response, the value of the @ptr
field for a log event is the value to use as logRecordPointer
to retrieve that complete log event record.
Specify true
to display the log event fields with all sensitive data unmasked and visible. The default is false
.
To use this operation with this parameter, you must be signed into an account with the logs:Unmask
permission.
The status of the most recent running of the query. Possible values are Cancelled
, Complete
, Failed
, Running
, Scheduled
, Timeout
, and Unknown
.
Queries time out after 15 minutes of execution. To avoid having your queries time out, reduce the time range being searched or partition your query into a number of queries.
" + "documentation": "The status of the most recent running of the query. Possible values are Cancelled
, Complete
, Failed
, Running
, Scheduled
, Timeout
, and Unknown
.
Queries time out after 15 minutes of runtime. To avoid having your queries time out, reduce the time range being searched or partition your query into a number of queries.
" } } }, + "IncludeLinkedAccounts": { + "type": "boolean" + }, "InputLogEvent": { "type": "structure", "required": [ @@ -2171,7 +2365,7 @@ "members": { "timestamp": { "shape": "Timestamp", - "documentation": "The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
" + "documentation": "The Amazon Resource Name (ARN) of the KMS key to use when encrypting log data.
" + }, + "dataProtectionStatus": { + "shape": "DataProtectionStatus", + "documentation": "Displays whether this log group has a protection policy, or whether it had one in the past. For more information, see PutDataProtectionPolicy.
" } }, "documentation": "Represents a log group.
" @@ -2305,12 +2503,30 @@ "shape": "LogGroupField" } }, + "LogGroupIdentifier": { + "type": "string", + "max": 2048, + "min": 1, + "pattern": "[\\w#+=/:,.@-]*" + }, + "LogGroupIdentifiers": { + "type": "list", + "member": { + "shape": "LogGroupIdentifier" + } + }, "LogGroupName": { "type": "string", "max": 512, "min": 1, "pattern": "[\\.\\-_/#A-Za-z0-9]+" }, + "LogGroupNamePattern": { + "type": "string", + "max": 512, + "min": 0, + "pattern": "[\\.\\-_/#A-Za-z0-9]*" + }, "LogGroupNames": { "type": "list", "member": { @@ -2344,19 +2560,19 @@ }, "creationTime": { "shape": "Timestamp", - "documentation": "The creation time of the stream, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The creation time of the stream, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
The time of the first event, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The time of the first event, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
The time of the most recent log event in the log stream in CloudWatch Logs. This number is expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. The lastEventTime
value updates on an eventual consistency basis. It typically updates in less than an hour from ingestion, but in rare situations might take longer.
The time of the most recent log event in the log stream in CloudWatch Logs. This number is expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. The lastEventTime
value updates on an eventual consistency basis. It typically updates in less than an hour from ingestion, but in rare situations might take longer.
The ingestion time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The ingestion time, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
The number of bytes stored.
Important: On June 17, 2019, this parameter was deprecated for log streams, and is always reported as zero. This change applies only to log streams. The storedBytes
parameter for log groups is not affected.
The number of bytes stored.
Important: As of June 17, 2019, this parameter is no longer supported for log streams, and is always reported as zero. This change applies only to log streams. The storedBytes
parameter for log groups is not affected.
The creation time of the metric filter, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The creation time of the metric filter, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
The fields to use as dimensions for the metric. One metric filter can include as many as three dimensions.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
To help prevent accidental high charges, Amazon disables a metric filter if it generates 1000 different name/value pairs for the dimensions that you have specified within a certain amount of time.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
The fields to use as dimensions for the metric. One metric filter can include as many as three dimensions.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs disables a metric filter if it generates 1000 different name/value pairs for your specified dimensions within a certain amount of time. This helps to prevent accidental high charges.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
The value to publish to the CloudWatch metric. For example, if you're counting the occurrences of a term like \"Error\", the value is \"1\" for each occurrence. If you're counting the bytes transferred, the value is the value in the log event.
", + "documentation": "The value to publish to the CloudWatch metric. For example, if you're counting the occurrences of a term like Error
, the value is 1
for each occurrence. If you're counting the bytes transferred, the value is the value in the log event.
The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
The time the event was ingested, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The time the event was ingested, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
Represents a log event.
" @@ -2553,6 +2769,40 @@ "PolicyName": { "type": "string" }, + "PutDataProtectionPolicyRequest": { + "type": "structure", + "required": [ + "logGroupIdentifier", + "policyDocument" + ], + "members": { + "logGroupIdentifier": { + "shape": "LogGroupIdentifier", + "documentation": "Specify either the log group name or log group ARN.
" + }, + "policyDocument": { + "shape": "DataProtectionPolicyDocument", + "documentation": "Specify the data protection policy, in JSON.
This policy must include two JSON blocks:
The first block must include both a DataIdentifer
array and an Operation
property with an Audit
action. The DataIdentifer
array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask.
The Operation
property with an Audit
action is required to find the sensitive data terms. This Audit
action must contain a FindingsDestination
object. You can optionally use that FindingsDestination
object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist.
The second block must include both a DataIdentifer
array and an Operation
property with an Deidentify
action. The DataIdentifer
array must exactly match the DataIdentifer
array in the first block of the policy.
The Operation
property with the Deidentify
action is what actually masks the data, and it must contain the \"MaskConfig\": {}
object. The \"MaskConfig\": {}
object must be empty.
For an example data protection policy, see the Examples section on this page.
The contents of two DataIdentifer
arrays must match exactly.
The log group name or ARN that you specified in your request.
" + }, + "policyDocument": { + "shape": "DataProtectionPolicyDocument", + "documentation": "The data protection policy used for this log group.
" + }, + "lastUpdatedTime": { + "shape": "Timestamp", + "documentation": "The date and time that this policy was most recently updated.
" + } + } + }, "PutDestinationPolicyRequest": { "type": "structure", "required": [ @@ -2684,7 +2934,7 @@ "members": { "name": { "shape": "QueryDefinitionName", - "documentation": "A name for the query definition. If you are saving a lot of query definitions, we recommend that you name them so that you can easily find the ones you want by using the first part of the name as a filter in the queryDefinitionNamePrefix
parameter of DescribeQueryDefinitions.
A name for the query definition. If you are saving numerous query definitions, we recommend that you name them. This way, you can find the ones you want by using the first part of the name as a filter in the queryDefinitionNamePrefix
parameter of DescribeQueryDefinitions.
Details of the new policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string. This parameter is required.
The following example creates a resource policy enabling the Route 53 service to put DNS query logs in to the specified log group. Replace \"logArn\"
with the ARN of your CloudWatch Logs resource, such as a log group or log stream.
CloudWatch Logs also supports aws:SourceArn and aws:SourceAccount condition context keys.
In the example resource policy, you would replace the value of SourceArn
with the resource making the call from Route 53 to CloudWatch Logs and replace the value of SourceAccount
with the Amazon Web Services account ID making that call.
{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Sid\": \"Route53LogsToCloudWatchLogs\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": [ \"route53.amazonaws.com\" ] }, \"Action\": \"logs:PutLogEvents\", \"Resource\": \"logArn\", \"Condition\": { \"ArnLike\": { \"aws:SourceArn\": \"myRoute53ResourceArn\" }, \"StringEquals\": { \"aws:SourceAccount\": \"myAwsAccountId\" } } } ] }
Details of the new policy, including the identity of the principal that is enabled to put logs to this account. This is formatted as a JSON string. This parameter is required.
The following example creates a resource policy enabling the Route 53 service to put DNS query logs in to the specified log group. Replace \"logArn\"
with the ARN of your CloudWatch Logs resource, such as a log group or log stream.
CloudWatch Logs also supports aws:SourceArn and aws:SourceAccount condition context keys.
In the example resource policy, you would replace the value of SourceArn
with the resource making the call from Route 53 to CloudWatch Logs. You would also replace the value of SourceAccount
with the Amazon Web Services account ID making that call.
{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Sid\": \"Route53LogsToCloudWatchLogs\", \"Effect\": \"Allow\", \"Principal\": { \"Service\": [ \"route53.amazonaws.com\" ] }, \"Action\": \"logs:PutLogEvents\", \"Resource\": \"logArn\", \"Condition\": { \"ArnLike\": { \"aws:SourceArn\": \"myRoute53ResourceArn\" }, \"StringEquals\": { \"aws:SourceAccount\": \"myAwsAccountId\" } } } ] }
The ARN of the destination to deliver matching log events to. Currently, the supported destinations are:
An Amazon Kinesis stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination (specified using an ARN) belonging to a different account, for cross-account delivery.
If you are setting up a cross-account subscription, the destination must have an IAM policy associated with it that allows the sender to send logs to the destination. For more information, see PutDestinationPolicy.
An Amazon Kinesis Firehose delivery stream belonging to the same account as the subscription filter, for same-account delivery.
A Lambda function belonging to the same account as the subscription filter, for same-account delivery.
The ARN of the destination to deliver matching log events to. Currently, the supported destinations are:
An Amazon Kinesis stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination (specified using an ARN) belonging to a different account, for cross-account delivery.
If you're setting up a cross-account subscription, the destination must have an IAM policy associated with it. The IAM policy must allow the sender to send logs to the destination. For more information, see PutDestinationPolicy.
A Kinesis Data Firehose delivery stream belonging to the same account as the subscription filter, for same-account delivery.
A Lambda function belonging to the same account as the subscription filter, for same-account delivery.
The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to random for a more even distribution. This property is only applicable when the destination is an Amazon Kinesis stream.
" + "documentation": "The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to random for a more even distribution. This property is only applicable when the destination is an Amazon Kinesis data stream.
" } } }, @@ -2917,7 +3167,7 @@ }, "tooOldLogEventEndIndex": { "shape": "LogEventIndex", - "documentation": "The log events that are too old.
" + "documentation": "The log events that are dated too far in the past.
" }, "expiredLogEventEndIndex": { "shape": "LogEventIndex", @@ -2945,7 +3195,7 @@ }, "lastUpdatedTime": { "shape": "Timestamp", - "documentation": "Timestamp showing when this policy was last updated, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "Timestamp showing when this policy was last updated, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
A policy enabling one or more entities to put logs to a log group in this account.
" @@ -3043,19 +3293,23 @@ "members": { "logGroupName": { "shape": "LogGroupName", - "documentation": "The log group on which to perform the query.
A StartQuery
operation must include a logGroupNames
or a logGroupName
parameter, but not both.
The log group on which to perform the query.
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
or logGroupIdentifiers
.
The list of log groups to be queried. You can include up to 20 log groups.
A StartQuery
operation must include a logGroupNames
or a logGroupName
parameter, but not both.
The list of log groups to be queried. You can include up to 50 log groups.
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
or logGroupIdentifiers
.
The list of log groups to query. You can include up to 50 log groups.
You can specify them by the log group name or ARN. If a log group that you're querying is in a source account and you're using a monitoring account, you must specify the ARN of the log group here. The query definition must also be defined in the monitoring account.
If you specify an ARN, the ARN can't end with an asterisk (*).
A StartQuery
operation must include exactly one of the following parameters: logGroupName
, logGroupNames
or logGroupIdentifiers
.
The beginning of the time range to query. The range is inclusive, so the specified start time is included in the query. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.
" + "documentation": "The beginning of the time range to query. The range is inclusive, so the specified start time is included in the query. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC
.
The end of the time range to query. The range is inclusive, so the specified end time is included in the query. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.
" + "documentation": "The end of the time range to query. The range is inclusive, so the specified end time is included in the query. Specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC
.
The creation time of the subscription filter, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
" + "documentation": "The creation time of the subscription filter, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
.
Represents a subscription filter.
" @@ -3262,6 +3516,9 @@ "Token": { "type": "string" }, + "Unmask": { + "type": "boolean" + }, "UntagLogGroupRequest": { "type": "structure", "required": [ @@ -3302,5 +3559,5 @@ "type": "string" } }, - "documentation": "You can use Amazon CloudWatch Logs to monitor, store, and access your log files from EC2 instances, CloudTrail, and other sources. You can then retrieve the associated log data from CloudWatch Logs using the CloudWatch console, CloudWatch Logs commands in the Amazon Web Services CLI, CloudWatch Logs API, or CloudWatch Logs SDK.
You can use CloudWatch Logs to:
Monitor logs from EC2 instances in real-time: You can use CloudWatch Logs to monitor applications and systems using log data. For example, CloudWatch Logs can track the number of errors that occur in your application logs and send you a notification whenever the rate of errors exceeds a threshold that you specify. CloudWatch Logs uses your log data for monitoring so no code changes are required. For example, you can monitor application logs for specific literal terms (such as \"NullReferenceException\") or count the number of occurrences of a literal term at a particular position in log data (such as \"404\" status codes in an Apache access log). When the term you are searching for is found, CloudWatch Logs reports the data to a CloudWatch metric that you specify.
Monitor CloudTrail logged events: You can create alarms in CloudWatch and receive notifications of particular API activity as captured by CloudTrail. You can use the notification to perform troubleshooting.
Archive log data: You can use CloudWatch Logs to store your log data in highly durable storage. You can change the log retention setting so that any log events older than this setting are automatically deleted. The CloudWatch Logs agent makes it easy to quickly send both rotated and non-rotated log data off of a host and into the log service. You can then access the raw log data when you need it.
You can use Amazon CloudWatch Logs to monitor, store, and access your log files from EC2 instances, CloudTrail, and other sources. You can then retrieve the associated log data from CloudWatch Logs using the CloudWatch console. Alternatively, you can use CloudWatch Logs commands in the Amazon Web Services CLI, CloudWatch Logs API, or CloudWatch Logs SDK.
You can use CloudWatch Logs to:
Monitor logs from EC2 instances in real time: You can use CloudWatch Logs to monitor applications and systems using log data. For example, CloudWatch Logs can track the number of errors that occur in your application logs. Then, it can send you a notification whenever the rate of errors exceeds a threshold that you specify. CloudWatch Logs uses your log data for monitoring so no code changes are required. For example, you can monitor application logs for specific literal terms (such as \"NullReferenceException\"). You can also count the number of occurrences of a literal term at a particular position in log data (such as \"404\" status codes in an Apache access log). When the term you are searching for is found, CloudWatch Logs reports the data to a CloudWatch metric that you specify.
Monitor CloudTrail logged events: You can create alarms in CloudWatch and receive notifications of particular API activity as captured by CloudTrail. You can use the notification to perform troubleshooting.
Archive log data: You can use CloudWatch Logs to store your log data in highly durable storage. You can change the log retention setting so that any log events earlier than this setting are automatically deleted. The CloudWatch Logs agent helps to quickly send both rotated and non-rotated log data off of a host and into the log service. You can then access the raw log data when you need it.
Archive application.
" + }, + "ArchiveWave": { + "name": "ArchiveWave", + "http": { + "method": "POST", + "requestUri": "/ArchiveWave", + "responseCode": 200 + }, + "input": { + "shape": "ArchiveWaveRequest" + }, + "output": { + "shape": "Wave" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Archive wave.
" + }, + "AssociateApplications": { + "name": "AssociateApplications", + "http": { + "method": "POST", + "requestUri": "/AssociateApplications", + "responseCode": 200 + }, + "input": { + "shape": "AssociateApplicationsRequest" + }, + "output": { + "shape": "AssociateApplicationsResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Associate applications to wave.
", + "idempotent": true + }, + "AssociateSourceServers": { + "name": "AssociateSourceServers", + "http": { + "method": "POST", + "requestUri": "/AssociateSourceServers", + "responseCode": 200 + }, + "input": { + "shape": "AssociateSourceServersRequest" + }, + "output": { + "shape": "AssociateSourceServersResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Associate source servers to application.
", + "idempotent": true + }, "ChangeServerLifeCycleState": { "name": "ChangeServerLifeCycleState", "http": { @@ -42,6 +160,33 @@ ], "documentation": "Allows the user to set the SourceServer.LifeCycle.state property for specific Source Server IDs to one of the following: READY_FOR_TEST or READY_FOR_CUTOVER. This command only works if the Source Server is already launchable (dataReplicationInfo.lagDuration is not null.)
" }, + "CreateApplication": { + "name": "CreateApplication", + "http": { + "method": "POST", + "requestUri": "/CreateApplication", + "responseCode": 201 + }, + "input": { + "shape": "CreateApplicationRequest" + }, + "output": { + "shape": "Application" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Create application.
", + "idempotent": true + }, "CreateLaunchConfigurationTemplate": { "name": "CreateLaunchConfigurationTemplate", "http": { @@ -66,7 +211,7 @@ "shape": "AccessDeniedException" } ], - "documentation": "Creates a new ReplicationConfigurationTemplate.
" + "documentation": "Creates a new Launch Configuration Template.
" }, "CreateReplicationConfigurationTemplate": { "name": "CreateReplicationConfigurationTemplate", @@ -94,6 +239,60 @@ ], "documentation": "Creates a new ReplicationConfigurationTemplate.
" }, + "CreateWave": { + "name": "CreateWave", + "http": { + "method": "POST", + "requestUri": "/CreateWave", + "responseCode": 201 + }, + "input": { + "shape": "CreateWaveRequest" + }, + "output": { + "shape": "Wave" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Create wave.
", + "idempotent": true + }, + "DeleteApplication": { + "name": "DeleteApplication", + "http": { + "method": "POST", + "requestUri": "/DeleteApplication", + "responseCode": 204 + }, + "input": { + "shape": "DeleteApplicationRequest" + }, + "output": { + "shape": "DeleteApplicationResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Delete application.
", + "idempotent": true + }, "DeleteJob": { "name": "DeleteJob", "http": { @@ -145,7 +344,7 @@ "shape": "ConflictException" } ], - "documentation": "Creates a new ReplicationConfigurationTemplate.
", + "documentation": "Deletes a single Launch Configuration Template by ID.
", "idempotent": true }, "DeleteReplicationConfigurationTemplate": { @@ -226,6 +425,33 @@ "documentation": "Deletes a given vCenter client by ID.
", "idempotent": true }, + "DeleteWave": { + "name": "DeleteWave", + "http": { + "method": "POST", + "requestUri": "/DeleteWave", + "responseCode": 204 + }, + "input": { + "shape": "DeleteWaveRequest" + }, + "output": { + "shape": "DeleteWaveResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Delete wave.
", + "idempotent": true + }, "DescribeJobLogItems": { "name": "DescribeJobLogItems", "http": { @@ -296,7 +522,7 @@ "shape": "ValidationException" } ], - "documentation": "Creates a new ReplicationConfigurationTemplate.
" + "documentation": "Lists all Launch Configuration Templates, filtered by Launch Configuration Template IDs
" }, "DescribeReplicationConfigurationTemplates": { "name": "DescribeReplicationConfigurationTemplates", @@ -373,6 +599,60 @@ ], "documentation": "Returns a list of the installed vCenter clients.
" }, + "DisassociateApplications": { + "name": "DisassociateApplications", + "http": { + "method": "POST", + "requestUri": "/DisassociateApplications", + "responseCode": 200 + }, + "input": { + "shape": "DisassociateApplicationsRequest" + }, + "output": { + "shape": "DisassociateApplicationsResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Disassociate applications from wave.
", + "idempotent": true + }, + "DisassociateSourceServers": { + "name": "DisassociateSourceServers", + "http": { + "method": "POST", + "requestUri": "/DisassociateSourceServers", + "responseCode": 200 + }, + "input": { + "shape": "DisassociateSourceServersRequest" + }, + "output": { + "shape": "DisassociateSourceServersResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Disassociate source servers from application.
", + "idempotent": true + }, "DisconnectFromService": { "name": "DisconnectFromService", "http": { @@ -497,6 +777,49 @@ ], "documentation": "Initialize Application Migration Service.
" }, + "ListApplications": { + "name": "ListApplications", + "http": { + "method": "POST", + "requestUri": "/ListApplications", + "responseCode": 200 + }, + "input": { + "shape": "ListApplicationsRequest" + }, + "output": { + "shape": "ListApplicationsResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + } + ], + "documentation": "Retrieves all applications or multiple applications by ID.
" + }, + "ListSourceServerActions": { + "name": "ListSourceServerActions", + "http": { + "method": "POST", + "requestUri": "/ListSourceServerActions", + "responseCode": 200 + }, + "input": { + "shape": "ListSourceServerActionsRequest" + }, + "output": { + "shape": "ListSourceServerActionsResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "List source server post migration custom actions.
" + }, "ListTagsForResource": { "name": "ListTagsForResource", "http": { @@ -529,6 +852,49 @@ ], "documentation": "List all tags for your Application Migration Service resources.
" }, + "ListTemplateActions": { + "name": "ListTemplateActions", + "http": { + "method": "POST", + "requestUri": "/ListTemplateActions", + "responseCode": 200 + }, + "input": { + "shape": "ListTemplateActionsRequest" + }, + "output": { + "shape": "ListTemplateActionsResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "List template post migration custom actions.
" + }, + "ListWaves": { + "name": "ListWaves", + "http": { + "method": "POST", + "requestUri": "/ListWaves", + "responseCode": 200 + }, + "input": { + "shape": "ListWavesRequest" + }, + "output": { + "shape": "ListWavesResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + } + ], + "documentation": "Retrieves all waves or multiple waves by ID.
" + }, "MarkAsArchived": { "name": "MarkAsArchived", "http": { @@ -555,6 +921,116 @@ ], "documentation": "Archives specific Source Servers by setting the SourceServer.isArchived property to true for specified SourceServers by ID. This command only works for SourceServers with a lifecycle. state which equals DISCONNECTED or CUTOVER.
" }, + "PutSourceServerAction": { + "name": "PutSourceServerAction", + "http": { + "method": "POST", + "requestUri": "/PutSourceServerAction", + "responseCode": 200 + }, + "input": { + "shape": "PutSourceServerActionRequest" + }, + "output": { + "shape": "SourceServerActionDocument" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Put source server post migration custom action.
" + }, + "PutTemplateAction": { + "name": "PutTemplateAction", + "http": { + "method": "POST", + "requestUri": "/PutTemplateAction", + "responseCode": 200 + }, + "input": { + "shape": "PutTemplateActionRequest" + }, + "output": { + "shape": "TemplateActionDocument" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Put template post migration custom action.
" + }, + "RemoveSourceServerAction": { + "name": "RemoveSourceServerAction", + "http": { + "method": "POST", + "requestUri": "/RemoveSourceServerAction", + "responseCode": 204 + }, + "input": { + "shape": "RemoveSourceServerActionRequest" + }, + "output": { + "shape": "RemoveSourceServerActionResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ValidationException" + } + ], + "documentation": "Remove source server post migration custom action.
" + }, + "RemoveTemplateAction": { + "name": "RemoveTemplateAction", + "http": { + "method": "POST", + "requestUri": "/RemoveTemplateAction", + "responseCode": 204 + }, + "input": { + "shape": "RemoveTemplateActionRequest" + }, + "output": { + "shape": "RemoveTemplateActionResponse" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ValidationException" + } + ], + "documentation": "Remove template post migration custom action.
" + }, "RetryDataReplication": { "name": "RetryDataReplication", "http": { @@ -721,6 +1197,58 @@ ], "documentation": "Starts a job that terminates specific launched EC2 Test and Cutover instances. This command will not work for any Source Server with a lifecycle.state of TESTING, CUTTING_OVER, or CUTOVER.
" }, + "UnarchiveApplication": { + "name": "UnarchiveApplication", + "http": { + "method": "POST", + "requestUri": "/UnarchiveApplication", + "responseCode": 200 + }, + "input": { + "shape": "UnarchiveApplicationRequest" + }, + "output": { + "shape": "Application" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ServiceQuotaExceededException" + } + ], + "documentation": "Unarchive application.
" + }, + "UnarchiveWave": { + "name": "UnarchiveWave", + "http": { + "method": "POST", + "requestUri": "/UnarchiveWave", + "responseCode": 200 + }, + "input": { + "shape": "UnarchiveWaveRequest" + }, + "output": { + "shape": "Wave" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ServiceQuotaExceededException" + } + ], + "documentation": "Unarchive wave.
" + }, "UntagResource": { "name": "UntagResource", "http": { @@ -751,6 +1279,33 @@ "documentation": "Deletes the specified set of tags from the specified set of Application Migration Service resources.
", "idempotent": true }, + "UpdateApplication": { + "name": "UpdateApplication", + "http": { + "method": "POST", + "requestUri": "/UpdateApplication", + "responseCode": 200 + }, + "input": { + "shape": "UpdateApplicationRequest" + }, + "output": { + "shape": "Application" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Update application.
", + "idempotent": true + }, "UpdateLaunchConfiguration": { "name": "UpdateLaunchConfiguration", "http": { @@ -808,7 +1363,7 @@ "shape": "AccessDeniedException" } ], - "documentation": "Creates a new ReplicationConfigurationTemplate.
" + "documentation": "Updates an existing Launch Configuration Template by ID.
" }, "UpdateReplicationConfiguration": { "name": "UpdateReplicationConfiguration", @@ -876,14 +1431,43 @@ "name": "UpdateSourceServerReplicationType", "http": { "method": "POST", - "requestUri": "/UpdateSourceServerReplicationType", + "requestUri": "/UpdateSourceServerReplicationType", + "responseCode": 200 + }, + "input": { + "shape": "UpdateSourceServerReplicationTypeRequest" + }, + "output": { + "shape": "SourceServer" + }, + "errors": [ + { + "shape": "UninitializedAccountException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "ConflictException" + } + ], + "documentation": "Allows you to change between the AGENT_BASED replication type and the SNAPSHOT_SHIPPING replication type.
" + }, + "UpdateWave": { + "name": "UpdateWave", + "http": { + "method": "POST", + "requestUri": "/UpdateWave", "responseCode": 200 }, "input": { - "shape": "UpdateSourceServerReplicationTypeRequest" + "shape": "UpdateWaveRequest" }, "output": { - "shape": "SourceServer" + "shape": "Wave" }, "errors": [ { @@ -892,14 +1476,12 @@ { "shape": "ResourceNotFoundException" }, - { - "shape": "ValidationException" - }, { "shape": "ConflictException" } ], - "documentation": "Allows you to change between the AGENT_BASED replication type and the SNAPSHOT_SHIPPING replication type.
" + "documentation": "Update wave.
", + "idempotent": true } }, "shapes": { @@ -908,6 +1490,223 @@ "max": 2048, "min": 20 }, + "ActionID": { + "type": "string", + "max": 64, + "min": 1, + "pattern": "[0-9a-zA-Z]$" + }, + "ActionIDs": { + "type": "list", + "member": { + "shape": "ActionID" + }, + "max": 100, + "min": 0 + }, + "ActionName": { + "type": "string", + "max": 256, + "min": 1, + "pattern": "^[^\\s\\x00]( *[^\\s\\x00])*$" + }, + "Application": { + "type": "structure", + "members": { + "applicationAggregatedStatus": { + "shape": "ApplicationAggregatedStatus", + "documentation": "Application aggregated status.
" + }, + "applicationID": { + "shape": "ApplicationID", + "documentation": "Application ID.
" + }, + "arn": { + "shape": "ARN", + "documentation": "Application ARN.
" + }, + "creationDateTime": { + "shape": "ISO8601DatetimeString", + "documentation": "Application creation dateTime.
" + }, + "description": { + "shape": "ApplicationDescription", + "documentation": "Application description.
" + }, + "isArchived": { + "shape": "Boolean", + "documentation": "Application archival status.
" + }, + "lastModifiedDateTime": { + "shape": "ISO8601DatetimeString", + "documentation": "Application last modified dateTime.
" + }, + "name": { + "shape": "ApplicationName", + "documentation": "Application name.
" + }, + "tags": { + "shape": "TagsMap", + "documentation": "Application tags.
" + }, + "waveID": { + "shape": "WaveID", + "documentation": "Application wave ID.
" + } + } + }, + "ApplicationAggregatedStatus": { + "type": "structure", + "members": { + "healthStatus": { + "shape": "ApplicationHealthStatus", + "documentation": "Application aggregated status health status.
" + }, + "lastUpdateDateTime": { + "shape": "ISO8601DatetimeString", + "documentation": "Application aggregated status last update dateTime.
" + }, + "progressStatus": { + "shape": "ApplicationProgressStatus", + "documentation": "Application aggregated status progress status.
" + }, + "totalSourceServers": { + "shape": "PositiveInteger", + "documentation": "Application aggregated status total source servers amount.
" + } + }, + "documentation": "Application aggregated status.
" + }, + "ApplicationDescription": { + "type": "string", + "max": 600, + "min": 0, + "pattern": "^[^\\x00]*$" + }, + "ApplicationHealthStatus": { + "type": "string", + "enum": [ + "HEALTHY", + "LAGGING", + "ERROR" + ] + }, + "ApplicationID": { + "type": "string", + "max": 21, + "min": 21, + "pattern": "^app-[0-9a-zA-Z]{17}$" + }, + "ApplicationIDs": { + "type": "list", + "member": { + "shape": "ApplicationID" + }, + "max": 50, + "min": 1 + }, + "ApplicationIDsFilter": { + "type": "list", + "member": { + "shape": "ApplicationID" + }, + "max": 200, + "min": 0 + }, + "ApplicationName": { + "type": "string", + "max": 256, + "min": 1, + "pattern": "^[^\\s\\x00]( *[^\\s\\x00])*$" + }, + "ApplicationProgressStatus": { + "type": "string", + "enum": [ + "NOT_STARTED", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "ApplicationsList": { + "type": "list", + "member": { + "shape": "Application" + } + }, + "ArchiveApplicationRequest": { + "type": "structure", + "required": [ + "applicationID" + ], + "members": { + "applicationID": { + "shape": "ApplicationID", + "documentation": "Application ID.
" + } + } + }, + "ArchiveWaveRequest": { + "type": "structure", + "required": [ + "waveID" + ], + "members": { + "waveID": { + "shape": "WaveID", + "documentation": "Wave ID.
" + } + } + }, + "AssociateApplicationsRequest": { + "type": "structure", + "required": [ + "applicationIDs", + "waveID" + ], + "members": { + "applicationIDs": { + "shape": "ApplicationIDs", + "documentation": "Application IDs list.
" + }, + "waveID": { + "shape": "WaveID", + "documentation": "Wave ID.
" + } + } + }, + "AssociateApplicationsResponse": { + "type": "structure", + "members": {} + }, + "AssociateSourceServersRequest": { + "type": "structure", + "required": [ + "applicationID", + "sourceServerIDs" + ], + "members": { + "applicationID": { + "shape": "ApplicationID", + "documentation": "Application ID.
" + }, + "sourceServerIDs": { + "shape": "AssociateSourceServersRequestSourceServerIDs", + "documentation": "Source server IDs list.
" + } + } + }, + "AssociateSourceServersRequestSourceServerIDs": { + "type": "list", + "member": { + "shape": "SourceServerID" + }, + "max": 50, + "min": 1 + }, + "AssociateSourceServersResponse": { + "type": "structure", + "members": {} + }, "Boolean": { "type": "boolean", "box": true @@ -990,16 +1789,83 @@ "max": 256, "min": 0 }, + "CreateApplicationRequest": { + "type": "structure", + "required": [ + "name" + ], + "members": { + "description": { + "shape": "ApplicationDescription", + "documentation": "Application description.
" + }, + "name": { + "shape": "ApplicationName", + "documentation": "Application name.
" + }, + "tags": { + "shape": "TagsMap", + "documentation": "Application tags.
" + } + } + }, "CreateLaunchConfigurationTemplateRequest": { "type": "structure", "members": { + "associatePublicIpAddress": { + "shape": "Boolean", + "documentation": "Associate public Ip address.
" + }, + "bootMode": { + "shape": "BootMode", + "documentation": "Launch configuration template boot mode.
" + }, + "copyPrivateIp": { + "shape": "Boolean", + "documentation": "Copy private Ip.
" + }, + "copyTags": { + "shape": "Boolean", + "documentation": "Copy tags.
" + }, + "enableMapAutoTagging": { + "shape": "Boolean", + "documentation": "Enable map auto tagging.
" + }, + "largeVolumeConf": { + "shape": "LaunchTemplateDiskConf", + "documentation": "Large volume config.
" + }, + "launchDisposition": { + "shape": "LaunchDisposition", + "documentation": "Launch disposition.
" + }, + "licensing": { + "shape": "Licensing" + }, + "mapAutoTaggingMpeID": { + "shape": "TagValue", + "documentation": "Launch configuration template map auto tagging MPE ID.
" + }, "postLaunchActions": { "shape": "PostLaunchActions", - "documentation": "Request to associate the default Application Migration Service Security group with the Replication Settings template.
" + "documentation": "Launch configuration template post launch actions.
" + }, + "smallVolumeConf": { + "shape": "LaunchTemplateDiskConf", + "documentation": "Small volume config.
" + }, + "smallVolumeMaxSize": { + "shape": "PositiveInteger", + "documentation": "Small volume maximum size.
" }, "tags": { "shape": "TagsMap", - "documentation": "Request to associate the default Application Migration Service Security group with the Replication Settings template.
" + "documentation": "Request to associate tags during creation of a Launch Configuration Template.
" + }, + "targetInstanceTypeRightSizingMethod": { + "shape": "TargetInstanceTypeRightSizingMethod", + "documentation": "Target instance type right-sizing method.
" } } }, @@ -1073,6 +1939,26 @@ } } }, + "CreateWaveRequest": { + "type": "structure", + "required": [ + "name" + ], + "members": { + "description": { + "shape": "WaveDescription", + "documentation": "Wave description.
" + }, + "name": { + "shape": "WaveName", + "documentation": "Wave name.
" + }, + "tags": { + "shape": "TagsMap", + "documentation": "Wave tags.
" + } + } + }, "DataReplicationError": { "type": "structure", "members": { @@ -1257,6 +2143,22 @@ "SHIPPING_SNAPSHOT" ] }, + "DeleteApplicationRequest": { + "type": "structure", + "required": [ + "applicationID" + ], + "members": { + "applicationID": { + "shape": "ApplicationID", + "documentation": "Application ID.
" + } + } + }, + "DeleteApplicationResponse": { + "type": "structure", + "members": {} + }, "DeleteJobRequest": { "type": "structure", "required": [ @@ -1333,6 +2235,22 @@ } } }, + "DeleteWaveRequest": { + "type": "structure", + "required": [ + "waveID" + ], + "members": { + "waveID": { + "shape": "WaveID", + "documentation": "Wave ID.
" + } + } + }, + "DeleteWaveResponse": { + "type": "structure", + "members": {} + }, "DescribeJobLogItemsRequest": { "type": "structure", "required": [ @@ -1344,7 +2262,7 @@ "documentation": "Request to describe Job log job ID.
" }, "maxResults": { - "shape": "StrictlyPositiveInteger", + "shape": "MaxResultsType", "documentation": "Request to describe Job log item maximum results.
" }, "nextToken": { @@ -1374,7 +2292,7 @@ "documentation": "Request to describe Job log filters.
" }, "maxResults": { - "shape": "StrictlyPositiveInteger", + "shape": "MaxResultsType", "documentation": "Request to describe job log items by max results.
" }, "nextToken": { @@ -1427,15 +2345,15 @@ "members": { "launchConfigurationTemplateIDs": { "shape": "LaunchConfigurationTemplateIDs", - "documentation": "Request to disconnect Source Server from service by Server ID.
" + "documentation": "Request to filter Launch Configuration Templates list by Launch Configuration Template ID.
" }, "maxResults": { - "shape": "StrictlyPositiveInteger", - "documentation": "Request to disconnect Source Server from service by Server ID.
" + "shape": "MaxResultsType", + "documentation": "Maximum results to be returned in DescribeLaunchConfigurationTemplates.
" }, "nextToken": { "shape": "PaginationToken", - "documentation": "Request to disconnect Source Server from service by Server ID.
" + "documentation": "Next pagination token returned from DescribeLaunchConfigurationTemplates.
" } } }, @@ -1444,11 +2362,11 @@ "members": { "items": { "shape": "LaunchConfigurationTemplates", - "documentation": "Request to disconnect Source Server from service by Server ID.
" + "documentation": "List of items returned by DescribeLaunchConfigurationTemplates.
" }, "nextToken": { "shape": "PaginationToken", - "documentation": "Request to disconnect Source Server from service by Server ID.
" + "documentation": "Next pagination token returned from DescribeLaunchConfigurationTemplates.
" } } }, @@ -1456,7 +2374,7 @@ "type": "structure", "members": { "maxResults": { - "shape": "StrictlyPositiveInteger", + "shape": "MaxResultsType", "documentation": "Request to describe Replication Configuration template by max results.
" }, "nextToken": { @@ -1490,7 +2408,7 @@ "documentation": "Request to filter Source Servers list.
" }, "maxResults": { - "shape": "StrictlyPositiveInteger", + "shape": "MaxResultsType", "documentation": "Request to filter Source Servers list by maximum results.
" }, "nextToken": { @@ -1499,9 +2417,21 @@ } } }, + "DescribeSourceServersRequestApplicationIDs": { + "type": "list", + "member": { + "shape": "ApplicationID" + }, + "max": 200, + "min": 0 + }, "DescribeSourceServersRequestFilters": { "type": "structure", "members": { + "applicationIDs": { + "shape": "DescribeSourceServersRequestApplicationIDs", + "documentation": "Request to filter Source Servers list by application IDs.
" + }, "isArchived": { "shape": "Boolean", "documentation": "Request to filter Source Servers list by archived.
" @@ -1546,7 +2476,7 @@ "type": "structure", "members": { "maxResults": { - "shape": "StrictlyPositiveInteger", + "shape": "MaxResultsType", "documentation": "Maximum results to be returned in DescribeVcenterClients.
", "location": "querystring", "locationName": "maxResults" @@ -1572,6 +2502,56 @@ } } }, + "DisassociateApplicationsRequest": { + "type": "structure", + "required": [ + "applicationIDs", + "waveID" + ], + "members": { + "applicationIDs": { + "shape": "ApplicationIDs", + "documentation": "Application IDs list.
" + }, + "waveID": { + "shape": "WaveID", + "documentation": "Wave ID.
" + } + } + }, + "DisassociateApplicationsResponse": { + "type": "structure", + "members": {} + }, + "DisassociateSourceServersRequest": { + "type": "structure", + "required": [ + "applicationID", + "sourceServerIDs" + ], + "members": { + "applicationID": { + "shape": "ApplicationID", + "documentation": "Application ID.
" + }, + "sourceServerIDs": { + "shape": "DisassociateSourceServersRequestSourceServerIDs", + "documentation": "Source server IDs list.
" + } + } + }, + "DisassociateSourceServersRequestSourceServerIDs": { + "type": "list", + "member": { + "shape": "SourceServerID" + }, + "max": 50, + "min": 1 + }, + "DisassociateSourceServersResponse": { + "type": "structure", + "members": {} + }, "DisconnectFromServiceRequest": { "type": "structure", "required": [ @@ -1606,6 +2586,10 @@ "max": 1000, "min": 0 }, + "DocumentVersion": { + "type": "string", + "pattern": "^(\\$DEFAULT|\\$LATEST|[0-9]+)$" + }, "EC2InstanceID": { "type": "string", "max": 255, @@ -1617,6 +2601,12 @@ "max": 255, "min": 0 }, + "EC2LaunchConfigurationTemplateID": { + "type": "string", + "max": 20, + "min": 20, + "pattern": "^lt-[0-9a-z]{17}$" + }, "FinalizeCutoverRequest": { "type": "structure", "required": [ @@ -1845,26 +2835,26 @@ "members": { "executionID": { "shape": "BoundedString", - "documentation": "Job type.
" + "documentation": "AWS Systems Manager Document's execution ID of the of the Job Post Launch Actions.
" }, "executionStatus": { "shape": "PostLaunchActionExecutionStatus", - "documentation": "Job type.
" + "documentation": "AWS Systems Manager Document's execution status.
" }, "failureReason": { "shape": "BoundedString", - "documentation": "Job type.
" + "documentation": "AWS Systems Manager Document's failure reason.
" }, "ssmDocument": { "shape": "SsmDocument", - "documentation": "Job type.
" + "documentation": "AWS Systems Manager's Document of the of the Job Post Launch Actions.
" }, "ssmDocumentType": { "shape": "SsmDocumentType", - "documentation": "Job type.
" + "documentation": "AWS Systems Manager Document type.
" } }, - "documentation": "Job type.
" + "documentation": "Launch Status of the Job Post Launch Actions.
" }, "JobStatus": { "type": "string", @@ -1911,6 +2901,10 @@ "shape": "BoundedString", "documentation": "Launch configuration EC2 Launch template ID.
" }, + "enableMapAutoTagging": { + "shape": "Boolean", + "documentation": "Enable map auto tagging.
" + }, "launchDisposition": { "shape": "LaunchDisposition", "documentation": "Launch disposition for launch configuration.
" @@ -1919,6 +2913,10 @@ "shape": "Licensing", "documentation": "Launch configuration OS licensing.
" }, + "mapAutoTaggingMpeID": { + "shape": "TagValue", + "documentation": "Map auto tagging MPE ID.
" + }, "name": { "shape": "SmallBoundedString", "documentation": "Launch configuration name.
" @@ -1944,19 +2942,70 @@ "members": { "arn": { "shape": "ARN", - "documentation": "Copy Private IP during Launch Configuration.
" + "documentation": "ARN of the Launch Configuration Template.
" + }, + "associatePublicIpAddress": { + "shape": "Boolean", + "documentation": "Associate public Ip address.
" + }, + "bootMode": { + "shape": "BootMode", + "documentation": "Launch configuration template boot mode.
" + }, + "copyPrivateIp": { + "shape": "Boolean", + "documentation": "Copy private Ip.
" + }, + "copyTags": { + "shape": "Boolean", + "documentation": "Copy tags.
" + }, + "ec2LaunchTemplateID": { + "shape": "EC2LaunchConfigurationTemplateID", + "documentation": "EC2 launch template ID.
" + }, + "enableMapAutoTagging": { + "shape": "Boolean", + "documentation": "Enable map auto tagging.
" + }, + "largeVolumeConf": { + "shape": "LaunchTemplateDiskConf", + "documentation": "Large volume config.
" }, "launchConfigurationTemplateID": { "shape": "LaunchConfigurationTemplateID", - "documentation": "Copy Private IP during Launch Configuration.
" + "documentation": "ID of the Launch Configuration Template.
" + }, + "launchDisposition": { + "shape": "LaunchDisposition", + "documentation": "Launch disposition.
" + }, + "licensing": { + "shape": "Licensing" + }, + "mapAutoTaggingMpeID": { + "shape": "TagValue", + "documentation": "Launch configuration template map auto tagging MPE ID.
" }, "postLaunchActions": { "shape": "PostLaunchActions", - "documentation": "Copy Private IP during Launch Configuration.
" + "documentation": "Post Launch Actions of the Launch Configuration Template.
" + }, + "smallVolumeConf": { + "shape": "LaunchTemplateDiskConf", + "documentation": "Small volume config.
" + }, + "smallVolumeMaxSize": { + "shape": "PositiveInteger", + "documentation": "Small volume maximum size.
" }, "tags": { "shape": "TagsMap", - "documentation": "Copy Private IP during Launch Configuration.
" + "documentation": "Tags of the Launch Configuration Template.
" + }, + "targetInstanceTypeRightSizingMethod": { + "shape": "TargetInstanceTypeRightSizingMethod", + "documentation": "Target instance type right-sizing method.
" } } }, @@ -1999,6 +3048,24 @@ "TERMINATED" ] }, + "LaunchTemplateDiskConf": { + "type": "structure", + "members": { + "iops": { + "shape": "PositiveInteger", + "documentation": "Launch template disk iops configuration.
" + }, + "throughput": { + "shape": "PositiveInteger", + "documentation": "Launch template disk throughput configuration.
" + }, + "volumeType": { + "shape": "VolumeType", + "documentation": "Launch template disk volume type configuration.
" + } + }, + "documentation": "Launch template disk configuration.
" + }, "LaunchedInstance": { "type": "structure", "members": { @@ -2187,6 +3254,91 @@ "max": 10, "min": 0 }, + "ListApplicationsRequest": { + "type": "structure", + "members": { + "filters": { + "shape": "ListApplicationsRequestFilters", + "documentation": "Applications list filters.
" + }, + "maxResults": { + "shape": "MaxResultsType", + "documentation": "Maximum results to return when listing applications.
" + }, + "nextToken": { + "shape": "PaginationToken", + "documentation": "Request next token.
" + } + } + }, + "ListApplicationsRequestFilters": { + "type": "structure", + "members": { + "applicationIDs": { + "shape": "ApplicationIDsFilter", + "documentation": "Filter applications list by application ID.
" + }, + "isArchived": { + "shape": "Boolean", + "documentation": "Filter applications list by archival status.
" + }, + "waveIDs": { + "shape": "WaveIDsFilter", + "documentation": "Filter applications list by wave ID.
" + } + }, + "documentation": "Applications list filters.
" + }, + "ListApplicationsResponse": { + "type": "structure", + "members": { + "items": { + "shape": "ApplicationsList", + "documentation": "Applications list.
" + }, + "nextToken": { + "shape": "PaginationToken", + "documentation": "Response next token.
" + } + } + }, + "ListSourceServerActionsRequest": { + "type": "structure", + "required": [ + "sourceServerID" + ], + "members": { + "filters": { + "shape": "SourceServerActionsRequestFilters", + "documentation": "Filters to apply when listing source server post migration custom actions.
" + }, + "maxResults": { + "shape": "MaxResultsType", + "documentation": "Maximum amount of items to return when listing source server post migration custom actions.
" + }, + "nextToken": { + "shape": "PaginationToken", + "documentation": "Next token to use when listing source server post migration custom actions.
" + }, + "sourceServerID": { + "shape": "SourceServerID", + "documentation": "Source server ID.
" + } + } + }, + "ListSourceServerActionsResponse": { + "type": "structure", + "members": { + "items": { + "shape": "SourceServerActionDocuments", + "documentation": "List of source server post migration custom actions.
" + }, + "nextToken": { + "shape": "PaginationToken", + "documentation": "Next token returned when listing source server post migration custom actions.
" + } + } + }, "ListTagsForResourceRequest": { "type": "structure", "required": [ @@ -2210,6 +3362,87 @@ } } }, + "ListTemplateActionsRequest": { + "type": "structure", + "required": [ + "launchConfigurationTemplateID" + ], + "members": { + "filters": { + "shape": "TemplateActionsRequestFilters", + "documentation": "Filters to apply when listing template post migration custom actions.
" + }, + "launchConfigurationTemplateID": { + "shape": "LaunchConfigurationTemplateID", + "documentation": "Launch configuration template ID.
" + }, + "maxResults": { + "shape": "MaxResultsType", + "documentation": "Maximum amount of items to return when listing template post migration custom actions.
" + }, + "nextToken": { + "shape": "PaginationToken", + "documentation": "Next token to use when listing template post migration custom actions.
" + } + } + }, + "ListTemplateActionsResponse": { + "type": "structure", + "members": { + "items": { + "shape": "TemplateActionDocuments", + "documentation": "List of template post migration custom actions.
" + }, + "nextToken": { + "shape": "PaginationToken", + "documentation": "Next token returned when listing template post migration custom actions.
" + } + } + }, + "ListWavesRequest": { + "type": "structure", + "members": { + "filters": { + "shape": "ListWavesRequestFilters", + "documentation": "Waves list filters.
" + }, + "maxResults": { + "shape": "MaxResultsType", + "documentation": "Maximum results to return when listing waves.
" + }, + "nextToken": { + "shape": "PaginationToken", + "documentation": "Request next token.
" + } + } + }, + "ListWavesRequestFilters": { + "type": "structure", + "members": { + "isArchived": { + "shape": "Boolean", + "documentation": "Filter waves list by archival status.
" + }, + "waveIDs": { + "shape": "WaveIDsFilter", + "documentation": "Filter waves list by wave ID.
" + } + }, + "documentation": "Waves list filters.
" + }, + "ListWavesResponse": { + "type": "structure", + "members": { + "items": { + "shape": "WavesList", + "documentation": "Waves list.
" + }, + "nextToken": { + "shape": "PaginationToken", + "documentation": "Response next token.
" + } + } + }, "MarkAsArchivedRequest": { "type": "structure", "required": [ @@ -2222,6 +3455,11 @@ } } }, + "MaxResultsType": { + "type": "integer", + "max": 1000, + "min": 1 + }, "NetworkInterface": { "type": "structure", "members": { @@ -2258,6 +3496,15 @@ }, "documentation": "Operating System.
" }, + "OperatingSystemString": { + "type": "string", + "pattern": "^(linux|windows)$" + }, + "OrderType": { + "type": "integer", + "max": 10000, + "min": 1001 + }, "PaginationToken": { "type": "string", "max": 2048, @@ -2275,11 +3522,11 @@ }, "launchedEc2InstanceID": { "shape": "EC2InstanceID", - "documentation": "Participating server Source Server ID.
" + "documentation": "Participating server's launched ec2 instance ID.
" }, "postLaunchActionsStatus": { "shape": "PostLaunchActionsStatus", - "documentation": "Participating server Source Server ID.
" + "documentation": "Participating server's Post Launch Actions Status.
" }, "sourceServerID": { "shape": "SourceServerID", @@ -2311,53 +3558,204 @@ "members": { "cloudWatchLogGroupName": { "shape": "CloudWatchLogGroupName", - "documentation": "Server participating in Job.
" + "documentation": "AWS Systems Manager Command's CloudWatch log group name.
" }, "deployment": { "shape": "PostLaunchActionsDeploymentType", - "documentation": "Server participating in Job.
" + "documentation": "Deployment type in which AWS Systems Manager Documents will be executed.
" }, "s3LogBucket": { "shape": "S3LogBucketName", - "documentation": "Server participating in Job.
" + "documentation": "AWS Systems Manager Command's logs S3 log bucket.
" }, "s3OutputKeyPrefix": { "shape": "BoundedString", - "documentation": "Server participating in Job.
" + "documentation": "AWS Systems Manager Command's logs S3 output key prefix.
" + }, + "ssmDocuments": { + "shape": "SsmDocuments", + "documentation": "AWS Systems Manager Documents.
" + } + }, + "documentation": "Post Launch Actions to executed on the Test or Cutover instance.
" + }, + "PostLaunchActionsDeploymentType": { + "type": "string", + "enum": [ + "TEST_AND_CUTOVER", + "CUTOVER_ONLY", + "TEST_ONLY" + ] + }, + "PostLaunchActionsLaunchStatusList": { + "type": "list", + "member": { + "shape": "JobPostLaunchActionsLaunchStatus" + } + }, + "PostLaunchActionsStatus": { + "type": "structure", + "members": { + "postLaunchActionsLaunchStatusList": { + "shape": "PostLaunchActionsLaunchStatusList", + "documentation": "List of Post Launch Action status.
" + }, + "ssmAgentDiscoveryDatetime": { + "shape": "ISO8601DatetimeString", + "documentation": "Time where the AWS Systems Manager was detected as running on the Test or Cutover instance.
" + } + }, + "documentation": "Status of the Post Launch Actions running on the Test or Cutover instance.
" + }, + "PutSourceServerActionRequest": { + "type": "structure", + "required": [ + "actionID", + "actionName", + "documentIdentifier", + "order", + "sourceServerID" + ], + "members": { + "actionID": { + "shape": "ActionID", + "documentation": "Source server post migration custom action ID.
" + }, + "actionName": { + "shape": "ActionName", + "documentation": "Source server post migration custom action name.
" + }, + "active": { + "shape": "Boolean", + "documentation": "Source server post migration custom action active status.
" + }, + "documentIdentifier": { + "shape": "BoundedString", + "documentation": "Source server post migration custom action document identifier.
" + }, + "documentVersion": { + "shape": "DocumentVersion", + "documentation": "Source server post migration custom action document version.
" + }, + "mustSucceedForCutover": { + "shape": "Boolean", + "documentation": "Source server post migration custom action must succeed for cutover.
" + }, + "order": { + "shape": "OrderType", + "documentation": "Source server post migration custom action order.
" + }, + "parameters": { + "shape": "SsmDocumentParameters", + "documentation": "Source server post migration custom action parameters.
" + }, + "sourceServerID": { + "shape": "SourceServerID", + "documentation": "Source server ID.
" + }, + "timeoutSeconds": { + "shape": "StrictlyPositiveInteger", + "documentation": "Source server post migration custom action timeout in seconds.
" + } + } + }, + "PutTemplateActionRequest": { + "type": "structure", + "required": [ + "actionID", + "actionName", + "documentIdentifier", + "launchConfigurationTemplateID", + "order" + ], + "members": { + "actionID": { + "shape": "ActionID", + "documentation": "Template post migration custom action ID.
" + }, + "actionName": { + "shape": "BoundedString", + "documentation": "Template post migration custom action name.
" + }, + "active": { + "shape": "Boolean", + "documentation": "Template post migration custom action active status.
" + }, + "documentIdentifier": { + "shape": "BoundedString", + "documentation": "Template post migration custom action document identifier.
" + }, + "documentVersion": { + "shape": "DocumentVersion", + "documentation": "Template post migration custom action document version.
" + }, + "launchConfigurationTemplateID": { + "shape": "LaunchConfigurationTemplateID", + "documentation": "Launch configuration template ID.
" + }, + "mustSucceedForCutover": { + "shape": "Boolean", + "documentation": "Template post migration custom action must succeed for cutover.
" + }, + "operatingSystem": { + "shape": "OperatingSystemString", + "documentation": "Operating system eligible for this template post migration custom action.
" + }, + "order": { + "shape": "OrderType", + "documentation": "Template post migration custom action order.
" + }, + "parameters": { + "shape": "SsmDocumentParameters", + "documentation": "Template post migration custom action parameters.
" }, - "ssmDocuments": { - "shape": "SsmDocuments", - "documentation": "Server participating in Job.
" + "timeoutSeconds": { + "shape": "StrictlyPositiveInteger", + "documentation": "Template post migration custom action timeout in seconds.
" } - }, - "documentation": "Server participating in Job.
" - }, - "PostLaunchActionsDeploymentType": { - "type": "string", - "enum": [ - "TEST_AND_CUTOVER", - "CUTOVER_ONLY" - ] + } }, - "PostLaunchActionsLaunchStatusList": { - "type": "list", - "member": { - "shape": "JobPostLaunchActionsLaunchStatus" + "RemoveSourceServerActionRequest": { + "type": "structure", + "required": [ + "actionID", + "sourceServerID" + ], + "members": { + "actionID": { + "shape": "ActionID", + "documentation": "Source server post migration custom action ID to remove.
" + }, + "sourceServerID": { + "shape": "SourceServerID", + "documentation": "Source server ID of the post migration custom action to remove.
" + } } }, - "PostLaunchActionsStatus": { + "RemoveSourceServerActionResponse": { + "type": "structure", + "members": {} + }, + "RemoveTemplateActionRequest": { "type": "structure", + "required": [ + "actionID", + "launchConfigurationTemplateID" + ], "members": { - "postLaunchActionsLaunchStatusList": { - "shape": "PostLaunchActionsLaunchStatusList", - "documentation": "Server participating in Job.
" + "actionID": { + "shape": "ActionID", + "documentation": "Template post migration custom action ID to remove.
" }, - "ssmAgentDiscoveryDatetime": { - "shape": "ISO8601DatetimeString", - "documentation": "Server participating in Job.
" + "launchConfigurationTemplateID": { + "shape": "LaunchConfigurationTemplateID", + "documentation": "Launch configuration template ID of the post migration custom action to remove.
" } - }, - "documentation": "Server participating in Job.
" + } + }, + "RemoveTemplateActionResponse": { + "type": "structure", + "members": {} }, "ReplicationConfiguration": { "type": "structure", @@ -2673,6 +4071,10 @@ "SourceServer": { "type": "structure", "members": { + "applicationID": { + "shape": "ApplicationID", + "documentation": "Source server application ID.
" + }, "arn": { "shape": "ARN", "documentation": "Source server ARN.
" @@ -2715,6 +4117,65 @@ } } }, + "SourceServerActionDocument": { + "type": "structure", + "members": { + "actionID": { + "shape": "ActionID", + "documentation": "Source server post migration custom action ID.
" + }, + "actionName": { + "shape": "ActionName", + "documentation": "Source server post migration custom action name.
" + }, + "active": { + "shape": "Boolean", + "documentation": "Source server post migration custom action active status.
" + }, + "documentIdentifier": { + "shape": "BoundedString", + "documentation": "Source server post migration custom action document identifier.
" + }, + "documentVersion": { + "shape": "DocumentVersion", + "documentation": "Source server post migration custom action document version.
" + }, + "mustSucceedForCutover": { + "shape": "Boolean", + "documentation": "Source server post migration custom action must succeed for cutover.
" + }, + "order": { + "shape": "OrderType", + "documentation": "Source server post migration custom action order.
" + }, + "parameters": { + "shape": "SsmDocumentParameters", + "documentation": "Source server post migration custom action parameters.
" + }, + "timeoutSeconds": { + "shape": "StrictlyPositiveInteger", + "documentation": "Source server post migration custom action timeout in seconds.
" + } + } + }, + "SourceServerActionDocuments": { + "type": "list", + "member": { + "shape": "SourceServerActionDocument" + }, + "max": 100, + "min": 0 + }, + "SourceServerActionsRequestFilters": { + "type": "structure", + "members": { + "actionIDs": { + "shape": "ActionIDs", + "documentation": "Action IDs to filter source server post migration custom actions by.
" + } + }, + "documentation": "Source server post migration custom action filters.
" + }, "SourceServerID": { "type": "string", "max": 19, @@ -2736,26 +4197,26 @@ "members": { "actionName": { "shape": "BoundedString", - "documentation": "Source server replication type.
" + "documentation": "User-friendly name for the AWS Systems Manager Document.
" }, "mustSucceedForCutover": { "shape": "Boolean", - "documentation": "Source server replication type.
" + "documentation": "If true, Cutover will not be enabled if the document has failed.
" }, "parameters": { "shape": "SsmDocumentParameters", - "documentation": "Source server replication type.
" + "documentation": "AWS Systems Manager Document parameters.
" }, "ssmDocumentName": { "shape": "SsmDocumentName", - "documentation": "Source server replication type.
" + "documentation": "AWS Systems Manager Document name or full ARN.
" }, "timeoutSeconds": { "shape": "StrictlyPositiveInteger", - "documentation": "Source server replication type.
" + "documentation": "AWS Systems Manager Document timeout seconds.
" } }, - "documentation": "Source server replication type.
" + "documentation": "AWS Systems Manager Document.
" }, "SsmDocumentName": { "type": "string", @@ -2777,7 +4238,7 @@ "value": { "shape": "SsmParameterStoreParameters" }, - "max": 10, + "max": 20, "min": 0 }, "SsmDocumentType": { @@ -2804,14 +4265,14 @@ "members": { "parameterName": { "shape": "SsmParameterStoreParameterName", - "documentation": "Source server replication type.
" + "documentation": "AWS Systems Manager Parameter Store parameter name.
" }, "parameterType": { "shape": "SsmParameterStoreParameterType", - "documentation": "Source server replication type.
" + "documentation": "AWS Systems Manager Parameter Store parameter type.
" } }, - "documentation": "Source server replication type.
" + "documentation": "AWS Systems Manager Parameter Store parameter.
" }, "SsmParameterStoreParameterName": { "type": "string", @@ -2974,6 +4435,69 @@ "BASIC" ] }, + "TemplateActionDocument": { + "type": "structure", + "members": { + "actionID": { + "shape": "ActionID", + "documentation": "Template post migration custom action ID.
" + }, + "actionName": { + "shape": "BoundedString", + "documentation": "Template post migration custom action name.
" + }, + "active": { + "shape": "Boolean", + "documentation": "Template post migration custom action active status.
" + }, + "documentIdentifier": { + "shape": "BoundedString", + "documentation": "Template post migration custom action document identifier.
" + }, + "documentVersion": { + "shape": "DocumentVersion", + "documentation": "Template post migration custom action document version.
" + }, + "mustSucceedForCutover": { + "shape": "Boolean", + "documentation": "Template post migration custom action must succeed for cutover.
" + }, + "operatingSystem": { + "shape": "OperatingSystemString", + "documentation": "Operating system eligible for this template post migration custom action.
" + }, + "order": { + "shape": "OrderType", + "documentation": "Template post migration custom action order.
" + }, + "parameters": { + "shape": "SsmDocumentParameters", + "documentation": "Template post migration custom action parameters.
" + }, + "timeoutSeconds": { + "shape": "StrictlyPositiveInteger", + "documentation": "Template post migration custom action timeout in seconds.
" + } + } + }, + "TemplateActionDocuments": { + "type": "list", + "member": { + "shape": "TemplateActionDocument" + }, + "max": 100, + "min": 0 + }, + "TemplateActionsRequestFilters": { + "type": "structure", + "members": { + "actionIDs": { + "shape": "ActionIDs", + "documentation": "Action IDs to filter template post migration custom actions by.
" + } + }, + "documentation": "Template post migration custom action filters.
" + }, "TerminateTargetInstancesRequest": { "type": "structure", "required": [ @@ -3007,6 +4531,30 @@ } } }, + "UnarchiveApplicationRequest": { + "type": "structure", + "required": [ + "applicationID" + ], + "members": { + "applicationID": { + "shape": "ApplicationID", + "documentation": "Application ID.
" + } + } + }, + "UnarchiveWaveRequest": { + "type": "structure", + "required": [ + "waveID" + ], + "members": { + "waveID": { + "shape": "WaveID", + "documentation": "Wave ID.
" + } + } + }, "UntagResourceRequest": { "type": "structure", "required": [ @@ -3028,6 +4576,26 @@ } } }, + "UpdateApplicationRequest": { + "type": "structure", + "required": [ + "applicationID" + ], + "members": { + "applicationID": { + "shape": "ApplicationID", + "documentation": "Application ID.
" + }, + "description": { + "shape": "ApplicationDescription", + "documentation": "Application description.
" + }, + "name": { + "shape": "ApplicationName", + "documentation": "Application name.
" + } + } + }, "UpdateLaunchConfigurationRequest": { "type": "structure", "required": [ @@ -3046,6 +4614,10 @@ "shape": "Boolean", "documentation": "Update Launch configuration copy Tags request.
" }, + "enableMapAutoTagging": { + "shape": "Boolean", + "documentation": "Enable map auto tagging.
" + }, "launchDisposition": { "shape": "LaunchDisposition", "documentation": "Update Launch configuration launch disposition request.
" @@ -3054,6 +4626,10 @@ "shape": "Licensing", "documentation": "Update Launch configuration licensing request.
" }, + "mapAutoTaggingMpeID": { + "shape": "TagValue", + "documentation": "Launch configuration map auto tagging MPE ID.
" + }, "name": { "shape": "SmallBoundedString", "documentation": "Update Launch configuration name request.
" @@ -3077,13 +4653,60 @@ "launchConfigurationTemplateID" ], "members": { + "associatePublicIpAddress": { + "shape": "Boolean", + "documentation": "Associate public Ip address.
" + }, + "bootMode": { + "shape": "BootMode", + "documentation": "Launch configuration template boot mode.
" + }, + "copyPrivateIp": { + "shape": "Boolean", + "documentation": "Copy private Ip.
" + }, + "copyTags": { + "shape": "Boolean", + "documentation": "Copy tags.
" + }, + "enableMapAutoTagging": { + "shape": "Boolean", + "documentation": "Enable map auto tagging.
" + }, + "largeVolumeConf": { + "shape": "LaunchTemplateDiskConf", + "documentation": "Large volume config.
" + }, "launchConfigurationTemplateID": { "shape": "LaunchConfigurationTemplateID", - "documentation": "Update Launch configuration Target instance right sizing request.
" + "documentation": "Launch Configuration Template ID.
" + }, + "launchDisposition": { + "shape": "LaunchDisposition", + "documentation": "Launch disposition.
" + }, + "licensing": { + "shape": "Licensing" + }, + "mapAutoTaggingMpeID": { + "shape": "TagValue", + "documentation": "Launch configuration template map auto tagging MPE ID.
" }, "postLaunchActions": { "shape": "PostLaunchActions", - "documentation": "Update Launch configuration Target instance right sizing request.
" + "documentation": "Post Launch Action to execute on the Test or Cutover instance.
" + }, + "smallVolumeConf": { + "shape": "LaunchTemplateDiskConf", + "documentation": "Small volume config.
" + }, + "smallVolumeMaxSize": { + "shape": "PositiveInteger", + "documentation": "Small volume maximum size.
" + }, + "targetInstanceTypeRightSizingMethod": { + "shape": "TargetInstanceTypeRightSizingMethod", + "documentation": "Target instance type right-sizing method.
" } } }, @@ -3236,6 +4859,26 @@ } } }, + "UpdateWaveRequest": { + "type": "structure", + "required": [ + "waveID" + ], + "members": { + "description": { + "shape": "WaveDescription", + "documentation": "Wave description.
" + }, + "name": { + "shape": "WaveName", + "documentation": "Wave name.
" + }, + "waveID": { + "shape": "WaveID", + "documentation": "Wave ID.
" + } + } + }, "VcenterClient": { "type": "structure", "members": { @@ -3285,6 +4928,133 @@ "member": { "shape": "VcenterClient" } + }, + "VolumeType": { + "type": "string", + "enum": [ + "io1", + "io2", + "gp3", + "gp2", + "st1", + "sc1", + "standard" + ] + }, + "Wave": { + "type": "structure", + "members": { + "arn": { + "shape": "ARN", + "documentation": "Wave ARN.
" + }, + "creationDateTime": { + "shape": "ISO8601DatetimeString", + "documentation": "Wave creation dateTime.
" + }, + "description": { + "shape": "WaveDescription", + "documentation": "Wave description.
" + }, + "isArchived": { + "shape": "Boolean", + "documentation": "Wave archival status.
" + }, + "lastModifiedDateTime": { + "shape": "ISO8601DatetimeString", + "documentation": "Wave last modified dateTime.
" + }, + "name": { + "shape": "WaveName", + "documentation": "Wave name.
" + }, + "tags": { + "shape": "TagsMap", + "documentation": "Wave tags.
" + }, + "waveAggregatedStatus": { + "shape": "WaveAggregatedStatus", + "documentation": "Wave aggregated status.
" + }, + "waveID": { + "shape": "WaveID", + "documentation": "Wave ID.
" + } + } + }, + "WaveAggregatedStatus": { + "type": "structure", + "members": { + "healthStatus": { + "shape": "WaveHealthStatus", + "documentation": "Wave aggregated status health status.
" + }, + "lastUpdateDateTime": { + "shape": "ISO8601DatetimeString", + "documentation": "Wave aggregated status last update dateTime.
" + }, + "progressStatus": { + "shape": "WaveProgressStatus", + "documentation": "Wave aggregated status progress status.
" + }, + "replicationStartedDateTime": { + "shape": "ISO8601DatetimeString", + "documentation": "DateTime marking when the first source server in the wave started replication.
" + }, + "totalApplications": { + "shape": "PositiveInteger", + "documentation": "Wave aggregated status total applications amount.
" + } + }, + "documentation": "Wave aggregated status.
" + }, + "WaveDescription": { + "type": "string", + "max": 600, + "min": 0, + "pattern": "^[^\\x00]*$" + }, + "WaveHealthStatus": { + "type": "string", + "enum": [ + "HEALTHY", + "LAGGING", + "ERROR" + ] + }, + "WaveID": { + "type": "string", + "max": 22, + "min": 22, + "pattern": "^wave-[0-9a-zA-Z]{17}$" + }, + "WaveIDsFilter": { + "type": "list", + "member": { + "shape": "WaveID" + }, + "max": 200, + "min": 0 + }, + "WaveName": { + "type": "string", + "max": 256, + "min": 1, + "pattern": "^[^\\s\\x00]( *[^\\s\\x00])*$" + }, + "WaveProgressStatus": { + "type": "string", + "enum": [ + "NOT_STARTED", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "WavesList": { + "type": "list", + "member": { + "shape": "Wave" + } } }, "documentation": "The Application Migration Service service.
" diff --git a/apis/mgn-2020-02-26.paginators.json b/apis/mgn-2020-02-26.paginators.json index b718aa78ba..04af579e20 100644 --- a/apis/mgn-2020-02-26.paginators.json +++ b/apis/mgn-2020-02-26.paginators.json @@ -35,6 +35,30 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "items" + }, + "ListApplications": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListSourceServerActions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListTemplateActions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" + }, + "ListWaves": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "items" } } } diff --git a/apis/monitoring-2010-08-01.min.json b/apis/monitoring-2010-08-01.min.json index 3c44e1b03c..63418fb0df 100644 --- a/apis/monitoring-2010-08-01.min.json +++ b/apis/monitoring-2010-08-01.min.json @@ -951,7 +951,11 @@ } }, "NextToken": {}, - "RecentlyActive": {} + "RecentlyActive": {}, + "IncludeLinkedAccounts": { + "type": "boolean" + }, + "OwningAccount": {} } }, "output": { @@ -964,11 +968,16 @@ "shape": "Si" } }, - "NextToken": {} + "NextToken": {}, + "OwningAccounts": { + "type": "list", + "member": {} + } }, "xmlOrder": [ "Metrics", - "NextToken" + "NextToken", + "OwningAccounts" ] } }, @@ -987,7 +996,7 @@ "type": "structure", "members": { "Tags": { - "shape": "S5n" + "shape": "S5p" } } } @@ -1054,7 +1063,7 @@ "shape": "S1t" }, "Tags": { - "shape": "S5n" + "shape": "S5p" }, "ActionsSuppressor": {}, "ActionsSuppressorWaitPeriod": { @@ -1107,7 +1116,7 @@ "RuleState": {}, "RuleDefinition": {}, "Tags": { - "shape": "S5n" + "shape": "S5p" } } }, @@ -1136,7 +1145,7 @@ "TemplateName": {}, "ResourceARN": {}, "Tags": { - "shape": "S5n" + "shape": "S5p" } } } @@ -1203,7 +1212,7 @@ "shape": "Se" }, "Tags": { - "shape": "S5n" + "shape": "S5p" }, "ThresholdMetricId": {} } @@ -1302,7 +1311,7 @@ "RoleArn": {}, "OutputFormat": {}, "Tags": { - "shape": "S5n" + "shape": "S5p" }, "StatisticsConfigurations": { "shape": "S4m" @@ -1341,7 +1350,7 @@ ], "members": { "Names": { - "shape": "S6i" + "shape": "S6k" } } }, @@ -1359,7 +1368,7 @@ ], "members": { "Names": { - "shape": "S6i" + "shape": "S6k" } } }, @@ -1379,7 +1388,7 @@ "members": { "ResourceARN": {}, "Tags": { - "shape": "S5n" + "shape": "S5p" } } }, @@ -1696,7 +1705,7 @@ } } }, - "S5n": { + "S5p": { "type": "list", "member": { "type": "structure", @@ -1710,7 +1719,7 @@ } } }, - "S6i": { + "S6k": { "type": "list", "member": {} } diff --git a/apis/monitoring-2010-08-01.normal.json b/apis/monitoring-2010-08-01.normal.json index 2d6d46defb..700fdea870 100644 --- a/apis/monitoring-2010-08-01.normal.json +++ b/apis/monitoring-2010-08-01.normal.json @@ -26,7 +26,7 @@ "shape": "ResourceNotFound" } ], - "documentation": "Deletes the specified alarms. You can delete up to 100 alarms in one operation. However, this total can include no more than one composite alarm. For example, you could delete 99 metric alarms and one composite alarms with one operation, but you can't delete two composite alarms with one operation.
In the event of an error, no alarms are deleted.
It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.
To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule
of one of the alarms to False
.
Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.
Deletes the specified alarms. You can delete up to 100 alarms in one operation. However, this total can include no more than one composite alarm. For example, you could delete 99 metric alarms and one composite alarms with one operation, but you can't delete two composite alarms with one operation.
In the event of an error, no alarms are deleted.
It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.
To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule
of one of the alarms to false
.
Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.
List the specified metrics. You can use the returned metrics with GetMetricData or GetMetricStatistics to obtain statistical data.
Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.
After you create a metric, allow up to 15 minutes before the metric appears. You can see statistics about the metric sooner by using GetMetricData or GetMetricStatistics.
ListMetrics
doesn't return information about metrics if those metrics haven't reported data in the past two weeks. To retrieve those metrics, use GetMetricData or GetMetricStatistics.
List the specified metrics. You can use the returned metrics with GetMetricData or GetMetricStatistics to get statistical data.
Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.
After you create a metric, allow up to 15 minutes for the metric to appear. To see metric statistics sooner, use GetMetricData or GetMetricStatistics.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view metrics from the linked source accounts. For more information, see CloudWatch cross-account observability.
ListMetrics
doesn't return information about metrics if those metrics haven't reported data in the past two weeks. To retrieve those metrics, use GetMetricData or GetMetricStatistics.
Creates or updates a composite alarm. When you create a composite alarm, you specify a rule expression for the alarm that takes into account the alarm states of other alarms that you have created. The composite alarm goes into ALARM state only if all conditions of the rule are met.
The alarms specified in a composite alarm's rule expression can include metric alarms and other composite alarms. The rule expression of a composite alarm can include as many as 100 underlying alarms. Any single alarm can be included in the rule expressions of as many as 150 composite alarms.
Using composite alarms can reduce alarm noise. You can create multiple metric alarms, and also create a composite alarm and set up alerts only for the composite alarm. For example, you could create a composite alarm that goes into ALARM state only when more than one of the underlying metric alarms are in ALARM state.
Currently, the only alarm actions that can be taken by composite alarms are notifying SNS topics.
It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.
To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule
of one of the alarms to False
.
Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.
When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA
. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed. For a composite alarm, this initial time after creation is the only time that the alarm can be in INSUFFICIENT_DATA
state.
When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.
To use this operation, you must be signed on with the cloudwatch:PutCompositeAlarm
permission that is scoped to *
. You can't create a composite alarms if your cloudwatch:PutCompositeAlarm
permission has a narrower scope.
If you are an IAM user, you must have iam:CreateServiceLinkedRole
to create a composite alarm that has Systems Manager OpsItem actions.
Creates or updates a composite alarm. When you create a composite alarm, you specify a rule expression for the alarm that takes into account the alarm states of other alarms that you have created. The composite alarm goes into ALARM state only if all conditions of the rule are met.
The alarms specified in a composite alarm's rule expression can include metric alarms and other composite alarms. The rule expression of a composite alarm can include as many as 100 underlying alarms. Any single alarm can be included in the rule expressions of as many as 150 composite alarms.
Using composite alarms can reduce alarm noise. You can create multiple metric alarms, and also create a composite alarm and set up alerts only for the composite alarm. For example, you could create a composite alarm that goes into ALARM state only when more than one of the underlying metric alarms are in ALARM state.
Currently, the only alarm actions that can be taken by composite alarms are notifying SNS topics.
It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.
To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule
of one of the alarms to false
.
Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.
When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA
. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed. For a composite alarm, this initial time after creation is the only time that the alarm can be in INSUFFICIENT_DATA
state.
When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.
To use this operation, you must be signed on with the cloudwatch:PutCompositeAlarm
permission that is scoped to *
. You can't create a composite alarms if your cloudwatch:PutCompositeAlarm
permission has a narrower scope.
If you are an IAM user, you must have iam:CreateServiceLinkedRole
to create a composite alarm that has Systems Manager OpsItem actions.
Creates or updates a metric stream. Metric streams can automatically stream CloudWatch metrics to Amazon Web Services destinations including Amazon S3 and to many third-party solutions.
For more information, see Using Metric Streams.
To create a metric stream, you must be logged on to an account that has the iam:PassRole
permission and either the CloudWatchFullAccess
policy or the cloudwatch:PutMetricStream
permission.
When you create or update a metric stream, you choose one of the following:
Stream metrics from all metric namespaces in the account.
Stream metrics from all metric namespaces in the account, except for the namespaces that you list in ExcludeFilters
.
Stream metrics from only the metric namespaces that you list in IncludeFilters
.
By default, a metric stream always sends the MAX
, MIN
, SUM
, and SAMPLECOUNT
statistics for each metric that is streamed. You can use the StatisticsConfigurations
parameter to have the metric stream also send additional statistics in the stream. Streaming additional statistics incurs additional costs. For more information, see Amazon CloudWatch Pricing.
When you use PutMetricStream
to create a new metric stream, the stream is created in the running
state. If you use it to update an existing stream, the state of the stream is not changed.
Creates or updates a metric stream. Metric streams can automatically stream CloudWatch metrics to Amazon Web Services destinations, including Amazon S3, and to many third-party solutions.
For more information, see Using Metric Streams.
To create a metric stream, you must be signed in to an account that has the iam:PassRole
permission and either the CloudWatchFullAccess
policy or the cloudwatch:PutMetricStream
permission.
When you create or update a metric stream, you choose one of the following:
Stream metrics from all metric namespaces in the account.
Stream metrics from all metric namespaces in the account, except for the namespaces that you list in ExcludeFilters
.
Stream metrics from only the metric namespaces that you list in IncludeFilters
.
By default, a metric stream always sends the MAX
, MIN
, SUM
, and SAMPLECOUNT
statistics for each metric that is streamed. You can use the StatisticsConfigurations
parameter to have the metric stream send additional statistics in the stream. Streaming additional statistics incurs additional costs. For more information, see Amazon CloudWatch Pricing.
When you use PutMetricStream
to create a new metric stream, the stream is created in the running
state. If you use it to update an existing stream, the state of the stream is not changed.
The ARN of the Amazon Kinesis Firehose delivery stream that is used by this metric stream.
" + "documentation": "The ARN of the Amazon Kinesis Data Firehose delivery stream that is used by this metric stream.
" }, "RoleArn": { "shape": "AmazonResourceName", @@ -2215,6 +2215,9 @@ "max": 255, "min": 1 }, + "IncludeLinkedAccounts": { + "type": "boolean" + }, "InsightRule": { "type": "structure", "required": [ @@ -2561,6 +2564,14 @@ "RecentlyActive": { "shape": "RecentlyActive", "documentation": "To filter the results to show only metrics that have had data points published in the past three hours, specify this parameter with a value of PT3H
. This is the only valid value for this parameter.
The results that are returned are an approximation of the value you specify. There is a low probability that the returned results include metrics with last published data as much as 40 minutes more than the specified time interval.
" + }, + "IncludeLinkedAccounts": { + "shape": "IncludeLinkedAccounts", + "documentation": "If you are using this operation in a monitoring account, specify true
to include metrics from source accounts in the returned data.
The default is false
.
When you use this operation in a monitoring account, use this field to return metrics only from one source account. To do so, specify that source account ID in this field, and also specify true
for IncludeLinkedAccounts
.
The token that marks the start of the next batch of returned results.
" + }, + "OwningAccounts": { + "shape": "OwningAccounts", + "documentation": "If you are using this operation in a monitoring account, this array contains the account IDs of the source accounts where the metrics in the returned data are from.
This field is a 1:1 mapping between each metric that is returned and the ID of the owning account.
" } }, "xmlOrder": [ "Metrics", - "NextToken" + "NextToken", + "OwningAccounts" ] }, "ListTagsForResourceInput": { @@ -2912,7 +2928,7 @@ }, "ReturnData": { "shape": "ReturnData", - "documentation": "When used in GetMetricData
, this option indicates whether to return the timestamps and raw data values of this metric. If you are performing this call just to do math expressions and do not also need the raw data returned, you can specify False
. If you omit this, the default of True
is used.
When used in PutMetricAlarm
, specify True
for the one expression result to use as the alarm. For all other metrics and expressions in the same PutMetricAlarm
operation, specify ReturnData
as False.
When used in GetMetricData
, this option indicates whether to return the timestamps and raw data values of this metric. If you are performing this call just to do math expressions and do not also need the raw data returned, you can specify false
. If you omit this, the default of true
is used.
When used in PutMetricAlarm
, specify true
for the one expression result to use as the alarm. For all other metrics and expressions in the same PutMetricAlarm
operation, specify ReturnData
as False.
The ID of the account where the metrics are located, if this is a cross-account alarm.
Use this field only for PutMetricAlarm
operations. It is not used in GetMetricData
operations.
The ID of the account where the metrics are located.
If you are performing a GetMetricData
operation in a monitoring account, use this to specify which account to retrieve this metric from.
If you are performing a PutMetricAlarm
operation, use this to specify which account contains the metric that the alarm is watching.
This structure is used in both GetMetricData
and PutMetricAlarm
. The supported use of this structure is different for those two operations.
When used in GetMetricData
, it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a Metrics Insights query or a math expression. A single GetMetricData
call can include up to 500 MetricDataQuery
structures.
When used in PutMetricAlarm
, it enables you to create an alarm based on a metric math expression. Each MetricDataQuery
in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm
call can include up to 20 MetricDataQuery
structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat
parameter to retrieve a metric, and as many as 10 structures that contain the Expression
parameter to perform a math expression. Of those Expression
structures, one must have True
as the value for ReturnData
. The result of this expression is the value the alarm watches.
Any expression used in a PutMetricAlarm
operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.
Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData
operation or a PutMetricAlarm
operation. These differences are explained in the following parameter list.
This structure is used in both GetMetricData
and PutMetricAlarm
. The supported use of this structure is different for those two operations.
When used in GetMetricData
, it indicates the metric data to return, and whether this call is just retrieving a batch set of data for one metric, or is performing a Metrics Insights query or a math expression. A single GetMetricData
call can include up to 500 MetricDataQuery
structures.
When used in PutMetricAlarm
, it enables you to create an alarm based on a metric math expression. Each MetricDataQuery
in the array specifies either a metric to retrieve, or a math expression to be performed on retrieved metrics. A single PutMetricAlarm
call can include up to 20 MetricDataQuery
structures in the array. The 20 structures can include as many as 10 structures that contain a MetricStat
parameter to retrieve a metric, and as many as 10 structures that contain the Expression
parameter to perform a math expression. Of those Expression
structures, one must have true
as the value for ReturnData
. The result of this expression is the value the alarm watches.
Any expression used in a PutMetricAlarm
operation must return a single time series. For more information, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.
Some of the parameters of this structure also have different uses whether you are using this structure in a GetMetricData
operation or a PutMetricAlarm
operation. These differences are explained in the following parameter list.
An array of metric data query structures that enables you to create an anomaly detector based on the result of a metric math expression. Each item in MetricDataQueries
gets a metric or performs a math expression. One item in MetricDataQueries
is the expression that provides the time series that the anomaly detector uses as input. Designate the expression by setting ReturnData
to True
for this object in the array. For all other expressions and metrics, set ReturnData
to False
. The designated expression must return a single time series.
An array of metric data query structures that enables you to create an anomaly detector based on the result of a metric math expression. Each item in MetricDataQueries
gets a metric or performs a math expression. One item in MetricDataQueries
is the expression that provides the time series that the anomaly detector uses as input. Designate the expression by setting ReturnData
to true
for this object in the array. For all other expressions and metrics, set ReturnData
to false
. The designated expression must return a single time series.
Indicates the CloudWatch math expression that provides the time series the anomaly detector uses as input. The designated math expression must return a single time series.
" @@ -3227,6 +3243,12 @@ "OutputFormat": { "type": "string" }, + "OwningAccounts": { + "type": "list", + "member": { + "shape": "AccountId" + } + }, "PartialFailure": { "type": "structure", "members": { @@ -3565,11 +3587,11 @@ }, "FirehoseArn": { "shape": "AmazonResourceName", - "documentation": "The ARN of the Amazon Kinesis Firehose delivery stream to use for this metric stream. This Amazon Kinesis Firehose delivery stream must already exist and must be in the same account as the metric stream.
" + "documentation": "The ARN of the Amazon Kinesis Data Firehose delivery stream to use for this metric stream. This Amazon Kinesis Data Firehose delivery stream must already exist and must be in the same account as the metric stream.
" }, "RoleArn": { "shape": "AmazonResourceName", - "documentation": "The ARN of an IAM role that this metric stream will use to access Amazon Kinesis Firehose resources. This IAM role must already exist and must be in the same account as the metric stream. This IAM role must include the following permissions:
firehose:PutRecord
firehose:PutRecordBatch
The ARN of an IAM role that this metric stream will use to access Amazon Kinesis Data Firehose resources. This IAM role must already exist and must be in the same account as the metric stream. This IAM role must include the following permissions:
firehose:PutRecord
firehose:PutRecordBatch
By default, a metric stream always sends the MAX
, MIN
, SUM
, and SAMPLECOUNT
statistics for each metric that is streamed. You can use this parameter to have the metric stream also send additional statistics in the stream. This array can have up to 100 members.
For each entry in this array, you specify one or more metrics and the list of additional statistics to stream for those metrics. The additional statistics that you can stream depend on the stream's OutputFormat
. If the OutputFormat
is json
, you can stream any additional statistic that is supported by CloudWatch, listed in CloudWatch statistics definitions. If the OutputFormat
is opentelemetry0.7
, you can stream percentile statistics such as p95, p99.9 and so on.
By default, a metric stream always sends the MAX
, MIN
, SUM
, and SAMPLECOUNT
statistics for each metric that is streamed. You can use this parameter to have the metric stream also send additional statistics in the stream. This array can have up to 100 members.
For each entry in this array, you specify one or more metrics and the list of additional statistics to stream for those metrics. The additional statistics that you can stream depend on the stream's OutputFormat
. If the OutputFormat
is json
, you can stream any additional statistic that is supported by CloudWatch, listed in CloudWatch statistics definitions. If the OutputFormat
is opentelemetry0.7
, you can stream percentile statistics such as p95, p99.9, and so on.
Creates a link between a source account and a sink that you have created in a monitoring account.
Before you create a link, you must create a sink in the monitoring account and create a sink policy in that account. The sink policy must permit the source account to link to it. You can grant permission to source accounts by granting permission to an entire organization or to individual accounts.
For more information, see CreateSink and PutSinkPolicy.
Each monitoring account can be linked to as many as 100,000 source accounts.
Each source account can be linked to as many as five monitoring accounts.
" + }, + "CreateSink": { + "name": "CreateSink", + "http": { + "method": "POST", + "requestUri": "/CreateSink", + "responseCode": 200 + }, + "input": { + "shape": "CreateSinkInput" + }, + "output": { + "shape": "CreateSinkOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "ConflictException" + }, + { + "shape": "MissingRequiredParameterException" + }, + { + "shape": "ServiceQuotaExceededException" + }, + { + "shape": "InvalidParameterException" + } + ], + "documentation": "Use this to create a sink in the current account, so that it can be used as a monitoring account in CloudWatch cross-account observability. A sink is a resource that represents an attachment point in a monitoring account. Source accounts can link to the sink to send observability data.
After you create a sink, you must create a sink policy that allows source accounts to attach to it. For more information, see PutSinkPolicy.
Each account can contain one sink. If you delete a sink, you can then create a new one in that account.
" + }, + "DeleteLink": { + "name": "DeleteLink", + "http": { + "method": "POST", + "requestUri": "/DeleteLink", + "responseCode": 200 + }, + "input": { + "shape": "DeleteLinkInput" + }, + "output": { + "shape": "DeleteLinkOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "MissingRequiredParameterException" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Deletes a link between a monitoring account sink and a source account. You must run this operation in the source account.
" + }, + "DeleteSink": { + "name": "DeleteSink", + "http": { + "method": "POST", + "requestUri": "/DeleteSink", + "responseCode": 200 + }, + "input": { + "shape": "DeleteSinkInput" + }, + "output": { + "shape": "DeleteSinkOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "ConflictException" + }, + { + "shape": "MissingRequiredParameterException" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Deletes a sink. You must delete all links to a sink before you can delete that sink.
" + }, + "GetLink": { + "name": "GetLink", + "http": { + "method": "POST", + "requestUri": "/GetLink", + "responseCode": 200 + }, + "input": { + "shape": "GetLinkInput" + }, + "output": { + "shape": "GetLinkOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "MissingRequiredParameterException" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Returns complete information about one link.
To use this operation, provide the link ARN. To retrieve a list of link ARNs, use ListLinks.
" + }, + "GetSink": { + "name": "GetSink", + "http": { + "method": "POST", + "requestUri": "/GetSink", + "responseCode": 200 + }, + "input": { + "shape": "GetSinkInput" + }, + "output": { + "shape": "GetSinkOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "MissingRequiredParameterException" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Returns complete information about one monitoring account sink.
To use this operation, provide the sink ARN. To retrieve a list of sink ARNs, use ListSinks.
" + }, + "GetSinkPolicy": { + "name": "GetSinkPolicy", + "http": { + "method": "POST", + "requestUri": "/GetSinkPolicy", + "responseCode": 200 + }, + "input": { + "shape": "GetSinkPolicyInput" + }, + "output": { + "shape": "GetSinkPolicyOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "MissingRequiredParameterException" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Returns the current sink policy attached to this sink. The sink policy specifies what accounts can attach to this sink as source accounts, and what types of data they can share.
" + }, + "ListAttachedLinks": { + "name": "ListAttachedLinks", + "http": { + "method": "POST", + "requestUri": "/ListAttachedLinks", + "responseCode": 200 + }, + "input": { + "shape": "ListAttachedLinksInput" + }, + "output": { + "shape": "ListAttachedLinksOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "MissingRequiredParameterException" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Returns a list of source account links that are linked to this monitoring account sink.
To use this operation, provide the sink ARN. To retrieve a list of sink ARNs, use ListSinks.
To find a list of links for one source account, use ListLinks.
" + }, + "ListLinks": { + "name": "ListLinks", + "http": { + "method": "POST", + "requestUri": "/ListLinks", + "responseCode": 200 + }, + "input": { + "shape": "ListLinksInput" + }, + "output": { + "shape": "ListLinksOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Use this operation in a source account to return a list of links to monitoring account sinks that this source account has.
To find a list of links for one monitoring account sink, use ListAttachedLinks from within the monitoring account.
" + }, + "ListSinks": { + "name": "ListSinks", + "http": { + "method": "POST", + "requestUri": "/ListSinks", + "responseCode": 200 + }, + "input": { + "shape": "ListSinksInput" + }, + "output": { + "shape": "ListSinksOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Use this operation in a monitoring account to return the list of sinks created in that account.
" + }, + "ListTagsForResource": { + "name": "ListTagsForResource", + "http": { + "method": "GET", + "requestUri": "/tags/{ResourceArn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceInput" + }, + "output": { + "shape": "ListTagsForResourceOutput" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Displays the tags associated with a resource. Both sinks and links support tagging.
" + }, + "PutSinkPolicy": { + "name": "PutSinkPolicy", + "http": { + "method": "POST", + "requestUri": "/PutSinkPolicy", + "responseCode": 200 + }, + "input": { + "shape": "PutSinkPolicyInput" + }, + "output": { + "shape": "PutSinkPolicyOutput" + }, + "errors": [ + { + "shape": "InternalServiceFault" + }, + { + "shape": "MissingRequiredParameterException" + }, + { + "shape": "InvalidParameterException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Creates or updates the resource policy that grants permissions to source accounts to link to the monitoring account sink. When you create a sink policy, you can grant permissions to all accounts in an organization or to individual accounts.
You can also use a sink policy to limit the types of data that is shared. The three types that you can allow or deny are:
Metrics - Specify with AWS::CloudWatch::Metric
Log groups - Specify with AWS::Logs::LogGroup
Traces - Specify with AWS::XRay::Trace
See the examples in this section to see how to specify permitted source accounts and data types.
" + }, + "TagResource": { + "name": "TagResource", + "http": { + "method": "PUT", + "requestUri": "/tags/{ResourceArn}", + "responseCode": 200 + }, + "input": { + "shape": "TagResourceInput" + }, + "output": { + "shape": "TagResourceOutput" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "TooManyTagsException" + }, + { + "shape": "ResourceNotFoundException" + } + ], + "documentation": "Assigns one or more tags (key-value pairs) to the specified resource. Both sinks and links can be tagged.
Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.
You can use the TagResource
action with a resource that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.
You can associate as many as 50 tags with a resource.
Unlike tagging permissions in other Amazon Web Services services, to tag or untag links and sinks you must have the oam:ResourceTag
permission. The iam:ResourceTag
permission does not allow you to tag and untag links and sinks.
Removes one or more tags from the specified resource.
Unlike tagging permissions in other Amazon Web Services services, to tag or untag links and sinks you must have the oam:ResourceTag
permission. The iam:TagResource
permission does not allow you to tag and untag links and sinks.
Use this operation to change what types of data are shared from a source account to its linked monitoring account sink. You can't change the sink or change the monitoring account with this operation.
To update the list of tags associated with the sink, use TagResource.
" + } + }, + "shapes": { + "Arn": { + "type": "string", + "pattern": "arn:(\\w|-)+:oam:.+:.+:.+.*" + }, + "CreateLinkInput": { + "type": "structure", + "required": [ + "LabelTemplate", + "ResourceTypes", + "SinkIdentifier" + ], + "members": { + "LabelTemplate": { + "shape": "LabelTemplate", + "documentation": "Specify a friendly human-readable name to use to identify this source account when you are viewing data from it in the monitoring account.
You can use a custom label or use the following variables:
$AccountName
is the name of the account
$AccountEmail
is the globally unique email address of the account
$AccountEmailNoDomain
is the email address of the account without the domain name
An array of strings that define which types of data that the source account shares with the monitoring account.
" + }, + "SinkIdentifier": { + "shape": "ResourceIdentifier", + "documentation": "The ARN of the sink to use to create this link. You can use ListSinks to find the ARNs of sinks.
For more information about sinks, see CreateSink.
" + }, + "Tags": { + "shape": "TagMapInput", + "documentation": "Assigns one or more tags (key-value pairs) to the link.
Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.
" + } + } + }, + "CreateLinkOutput": { + "type": "structure", + "members": { + "Arn": { + "shape": "String", + "documentation": "The ARN of the link that is newly created.
" + }, + "Id": { + "shape": "String", + "documentation": "The random ID string that Amazon Web Services generated as part of the link ARN.
" + }, + "Label": { + "shape": "String", + "documentation": "The label that you assigned to this link. If the labelTemplate
includes variables, this field displays the variables resolved to their actual values.
The exact label template that you specified, with the variables not resolved.
" + }, + "ResourceTypes": { + "shape": "ResourceTypesOutput", + "documentation": "The resource types supported by this link.
" + }, + "SinkArn": { + "shape": "String", + "documentation": "The ARN of the sink that is used for this link.
" + }, + "Tags": { + "shape": "TagMapOutput", + "documentation": "The tags assigned to the link.
" + } + } + }, + "CreateSinkInput": { + "type": "structure", + "required": [ + "Name" + ], + "members": { + "Name": { + "shape": "SinkName", + "documentation": "A name for the sink.
" + }, + "Tags": { + "shape": "TagMapInput", + "documentation": "Assigns one or more tags (key-value pairs) to the link.
Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.
" + } + } + }, + "CreateSinkOutput": { + "type": "structure", + "members": { + "Arn": { + "shape": "String", + "documentation": "The ARN of the sink that is newly created.
" + }, + "Id": { + "shape": "String", + "documentation": "The random ID string that Amazon Web Services generated as part of the sink ARN.
" + }, + "Name": { + "shape": "String", + "documentation": "The name of the sink.
" + }, + "Tags": { + "shape": "TagMapOutput", + "documentation": "The tags assigned to the sink.
" + } + } + }, + "DeleteLinkInput": { + "type": "structure", + "required": [ + "Identifier" + ], + "members": { + "Identifier": { + "shape": "ResourceIdentifier", + "documentation": "The ARN of the link to delete.
" + } + } + }, + "DeleteLinkOutput": { + "type": "structure", + "members": {} + }, + "DeleteSinkInput": { + "type": "structure", + "required": [ + "Identifier" + ], + "members": { + "Identifier": { + "shape": "ResourceIdentifier", + "documentation": "The ARN of the sink to delete.
" + } + } + }, + "DeleteSinkOutput": { + "type": "structure", + "members": {} + }, + "GetLinkInput": { + "type": "structure", + "required": [ + "Identifier" + ], + "members": { + "Identifier": { + "shape": "ResourceIdentifier", + "documentation": "The ARN of the link to retrieve information for.
" + } + } + }, + "GetLinkOutput": { + "type": "structure", + "members": { + "Arn": { + "shape": "String", + "documentation": "The ARN of the link.
" + }, + "Id": { + "shape": "String", + "documentation": "The random ID string that Amazon Web Services generated as part of the link ARN.
" + }, + "Label": { + "shape": "String", + "documentation": "The label that you assigned to this link, with the variables resolved to their actual values.
" + }, + "LabelTemplate": { + "shape": "String", + "documentation": "The exact label template that was specified when the link was created, with the template variables not resolved.
" + }, + "ResourceTypes": { + "shape": "ResourceTypesOutput", + "documentation": "The resource types supported by this link.
" + }, + "SinkArn": { + "shape": "String", + "documentation": "The ARN of the sink that is used for this link.
" + }, + "Tags": { + "shape": "TagMapOutput", + "documentation": "The tags assigned to the link.
" + } + } + }, + "GetSinkInput": { + "type": "structure", + "required": [ + "Identifier" + ], + "members": { + "Identifier": { + "shape": "ResourceIdentifier", + "documentation": "The ARN of the sink to retrieve information for.
" + } + } + }, + "GetSinkOutput": { + "type": "structure", + "members": { + "Arn": { + "shape": "String", + "documentation": "The ARN of the sink.
" + }, + "Id": { + "shape": "String", + "documentation": "The random ID string that Amazon Web Services generated as part of the sink ARN.
" + }, + "Name": { + "shape": "String", + "documentation": "The name of the sink.
" + }, + "Tags": { + "shape": "TagMapOutput", + "documentation": "The tags assigned to the sink.
" + } + } + }, + "GetSinkPolicyInput": { + "type": "structure", + "required": [ + "SinkIdentifier" + ], + "members": { + "SinkIdentifier": { + "shape": "ResourceIdentifier", + "documentation": "The ARN of the sink to retrieve the policy of.
" + } + } + }, + "GetSinkPolicyOutput": { + "type": "structure", + "members": { + "SinkArn": { + "shape": "String", + "documentation": "The ARN of the sink.
" + }, + "SinkId": { + "shape": "String", + "documentation": "The random ID string that Amazon Web Services generated as part of the sink ARN.
" + }, + "Policy": { + "shape": "String", + "documentation": "The policy that you specified, in JSON format.
" + } + } + }, + "LabelTemplate": { + "type": "string", + "max": 64, + "min": 1 + }, + "ListAttachedLinksInput": { + "type": "structure", + "required": [ + "SinkIdentifier" + ], + "members": { + "MaxResults": { + "shape": "ListAttachedLinksMaxResults", + "documentation": "Limits the number of returned links to the specified number.
" + }, + "NextToken": { + "shape": "NextToken", + "documentation": "The token for the next set of items to return. You received this token from a previous call.
" + }, + "SinkIdentifier": { + "shape": "ResourceIdentifier", + "documentation": "The ARN of the sink that you want to retrieve links for.
" + } + } + }, + "ListAttachedLinksItem": { + "type": "structure", + "members": { + "Label": { + "shape": "String", + "documentation": "The label that was assigned to this link at creation, with the variables resolved to their actual values.
" + }, + "LinkArn": { + "shape": "String", + "documentation": "The ARN of the link.
" + }, + "ResourceTypes": { + "shape": "ResourceTypesOutput", + "documentation": "The resource types supported by this link.
" + } + }, + "documentation": "A structure that contains information about one link attached to this monitoring account sink.
" + }, + "ListAttachedLinksItems": { + "type": "list", + "member": { + "shape": "ListAttachedLinksItem" + } + }, + "ListAttachedLinksMaxResults": { + "type": "integer", + "box": true, + "max": 1000, + "min": 1 + }, + "ListAttachedLinksOutput": { + "type": "structure", + "required": [ + "Items" + ], + "members": { + "Items": { + "shape": "ListAttachedLinksItems", + "documentation": "An array of structures that contain the information about the attached links.
" + }, + "NextToken": { + "shape": "String", + "documentation": "The token to use when requesting the next set of links.
" + } + } + }, + "ListLinksInput": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "ListLinksMaxResults", + "documentation": "Limits the number of returned links to the specified number.
" + }, + "NextToken": { + "shape": "NextToken", + "documentation": "The token for the next set of items to return. You received this token from a previous call.
" + } + } + }, + "ListLinksItem": { + "type": "structure", + "members": { + "Arn": { + "shape": "String", + "documentation": "The ARN of the link.
" + }, + "Id": { + "shape": "String", + "documentation": "The random ID string that Amazon Web Services generated as part of the link ARN.
" + }, + "Label": { + "shape": "String", + "documentation": "The label that was assigned to this link at creation, with the variables resolved to their actual values.
" + }, + "ResourceTypes": { + "shape": "ResourceTypesOutput", + "documentation": "The resource types supported by this link.
" + }, + "SinkArn": { + "shape": "String", + "documentation": "The ARN of the sink that this link is attached to.
" + } + }, + "documentation": "A structure that contains information about one of this source account's links to a monitoring account.
" + }, + "ListLinksItems": { + "type": "list", + "member": { + "shape": "ListLinksItem" + } + }, + "ListLinksMaxResults": { + "type": "integer", + "box": true, + "max": 5, + "min": 1 + }, + "ListLinksOutput": { + "type": "structure", + "required": [ + "Items" + ], + "members": { + "Items": { + "shape": "ListLinksItems", + "documentation": "An array of structures that contain the information about the returned links.
" + }, + "NextToken": { + "shape": "String", + "documentation": "The token to use when requesting the next set of links.
" + } + } + }, + "ListSinksInput": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "ListSinksMaxResults", + "documentation": "Limits the number of returned links to the specified number.
" + }, + "NextToken": { + "shape": "NextToken", + "documentation": "The token for the next set of items to return. You received this token from a previous call.
" + } + } + }, + "ListSinksItem": { + "type": "structure", + "members": { + "Arn": { + "shape": "String", + "documentation": "The ARN of the sink.
" + }, + "Id": { + "shape": "String", + "documentation": "The random ID string that Amazon Web Services generated as part of the sink ARN.
" + }, + "Name": { + "shape": "String", + "documentation": "The name of the sink.
" + } + }, + "documentation": "A structure that contains information about one of this monitoring account's sinks.
" + }, + "ListSinksItems": { + "type": "list", + "member": { + "shape": "ListSinksItem" + } + }, + "ListSinksMaxResults": { + "type": "integer", + "box": true, + "max": 100, + "min": 1 + }, + "ListSinksOutput": { + "type": "structure", + "required": [ + "Items" + ], + "members": { + "Items": { + "shape": "ListSinksItems", + "documentation": "An array of structures that contain the information about the returned sinks.
" + }, + "NextToken": { + "shape": "String", + "documentation": "The token to use when requesting the next set of sinks.
" + } + } + }, + "ListTagsForResourceInput": { + "type": "structure", + "required": [ + "ResourceArn" + ], + "members": { + "ResourceArn": { + "shape": "Arn", + "documentation": "The ARN of the resource that you want to view tags for.
The ARN format of a sink is arn:aws:oam:Region:account-id:sink/sink-id
The ARN format of a link is arn:aws:oam:Region:account-id:link/link-id
For more information about ARN format, see CloudWatch Logs resources and operations.
Unlike tagging permissions in other Amazon Web Services services, to retrieve the list of tags for links or sinks you must have the oam:RequestTag
permission. The aws:ReguestTag
permission does not allow you to tag and untag links and sinks.
The list of tags associated with the requested resource.>
" + } + } + }, + "NextToken": { + "type": "string" + }, + "PutSinkPolicyInput": { + "type": "structure", + "required": [ + "SinkIdentifier", + "Policy" + ], + "members": { + "SinkIdentifier": { + "shape": "ResourceIdentifier", + "documentation": "The ARN of the sink to attach this policy to.
" + }, + "Policy": { + "shape": "SinkPolicy", + "documentation": "The JSON policy to use. If you are updating an existing policy, the entire existing policy is replaced by what you specify here.
The policy must be in JSON string format with quotation marks escaped and no newlines.
For examples of different types of policies, see the Examples section on this page.
" + } + } + }, + "PutSinkPolicyOutput": { + "type": "structure", + "members": { + "SinkArn": { + "shape": "String", + "documentation": "The ARN of the sink.
" + }, + "SinkId": { + "shape": "String", + "documentation": "The random ID string that Amazon Web Services generated as part of the sink ARN.
" + }, + "Policy": { + "shape": "String", + "documentation": "The policy that you specified.
" + } + } + }, + "ResourceIdentifier": { + "type": "string", + "pattern": "[a-zA-Z0-9][a-zA-Z0-9_:\\.\\-\\/]{0,2047}" + }, + "ResourceType": { + "type": "string", + "enum": [ + "AWS::CloudWatch::Metric", + "AWS::Logs::LogGroup", + "AWS::XRay::Trace" + ] + }, + "ResourceTypesInput": { + "type": "list", + "member": { + "shape": "ResourceType" + }, + "max": 50, + "min": 1 + }, + "ResourceTypesOutput": { + "type": "list", + "member": { + "shape": "String" + } + }, + "SinkName": { + "type": "string", + "pattern": "[a-zA-Z0-9_\\.\\-]{1,255}" + }, + "SinkPolicy": { + "type": "string" + }, + "String": { + "type": "string" + }, + "TagKey": { + "type": "string", + "max": 128, + "min": 1 + }, + "TagKeys": { + "type": "list", + "member": { + "shape": "TagKey" + } + }, + "TagMapInput": { + "type": "map", + "key": { + "shape": "TagKey" + }, + "value": { + "shape": "TagValue" + }, + "max": 50, + "min": 0 + }, + "TagMapOutput": { + "type": "map", + "key": { + "shape": "String" + }, + "value": { + "shape": "String" + } + }, + "TagResourceInput": { + "type": "structure", + "required": [ + "ResourceArn", + "Tags" + ], + "members": { + "ResourceArn": { + "shape": "Arn", + "documentation": "The ARN of the resource that you're adding tags to.
The ARN format of a sink is arn:aws:oam:Region:account-id:sink/sink-id
The ARN format of a link is arn:aws:oam:Region:account-id:link/link-id
For more information about ARN format, see CloudWatch Logs resources and operations.
", + "location": "uri", + "locationName": "ResourceArn" + }, + "Tags": { + "shape": "TagMapInput", + "documentation": "The list of key-value pairs to associate with the resource.
" + } + } + }, + "TagResourceOutput": { + "type": "structure", + "members": {} + }, + "TagValue": { + "type": "string", + "max": 256, + "min": 0 + }, + "UntagResourceInput": { + "type": "structure", + "required": [ + "ResourceArn", + "TagKeys" + ], + "members": { + "ResourceArn": { + "shape": "Arn", + "documentation": "The ARN of the resource that you're removing tags from.
The ARN format of a sink is arn:aws:oam:Region:account-id:sink/sink-id
The ARN format of a link is arn:aws:oam:Region:account-id:link/link-id
For more information about ARN format, see CloudWatch Logs resources and operations.
", + "location": "uri", + "locationName": "ResourceArn" + }, + "TagKeys": { + "shape": "TagKeys", + "documentation": "The list of tag keys to remove from the resource.
", + "location": "querystring", + "locationName": "tagKeys" + } + } + }, + "UntagResourceOutput": { + "type": "structure", + "members": {} + }, + "UpdateLinkInput": { + "type": "structure", + "required": [ + "Identifier", + "ResourceTypes" + ], + "members": { + "Identifier": { + "shape": "ResourceIdentifier", + "documentation": "The ARN of the link that you want to update.
" + }, + "ResourceTypes": { + "shape": "ResourceTypesInput", + "documentation": "An array of strings that define which types of data that the source account will send to the monitoring account.
Your input here replaces the current set of data types that are shared.
" + } + } + }, + "UpdateLinkOutput": { + "type": "structure", + "members": { + "Arn": { + "shape": "String", + "documentation": "The ARN of the link that you have updated.
" + }, + "Id": { + "shape": "String", + "documentation": "The random ID string that Amazon Web Services generated as part of the sink ARN.
" + }, + "Label": { + "shape": "String", + "documentation": "The label assigned to this link, with the variables resolved to their actual values.
" + }, + "LabelTemplate": { + "shape": "LabelTemplate", + "documentation": "The exact label template that was specified when the link was created, with the template variables not resolved.
" + }, + "ResourceTypes": { + "shape": "ResourceTypesOutput", + "documentation": "The resource types now supported by this link.
" + }, + "SinkArn": { + "shape": "String", + "documentation": "The ARN of the sink that is used for this link.
" + }, + "Tags": { + "shape": "TagMapOutput", + "documentation": "The tags assigned to the link.
" + } + } + } + }, + "documentation": "Use Amazon CloudWatch Observability Access Manager to create and manage links between source accounts and monitoring accounts by using CloudWatch cross-account observability. With CloudWatch cross-account observability, you can monitor and troubleshoot applications that span multiple accounts within a Region. Seamlessly search, visualize, and analyze your metrics, logs, and traces in any of the linked accounts without account boundaries.
<p>Set up one or more Amazon Web Services accounts as <i>monitoring accounts</i> and link them with multiple <i>source accounts</i>. A monitoring account is a central Amazon Web Services account that can view and interact with observability data generated from source accounts. A source account is an individual Amazon Web Services account that generates observability data for the resources that reside in it. Source accounts share their observability data with the monitoring account. The shared observability data can include metrics in Amazon CloudWatch, logs in Amazon CloudWatch Logs, and traces in X-Ray.</p>
"
+}
\ No newline at end of file
diff --git a/apis/oam-2022-06-10.paginators.json b/apis/oam-2022-06-10.paginators.json
new file mode 100644
index 0000000000..3595f00983
--- /dev/null
+++ b/apis/oam-2022-06-10.paginators.json
@@ -0,0 +1,22 @@
+{
+ "pagination": {
+ "ListAttachedLinks": {
+ "input_token": "NextToken",
+ "output_token": "NextToken",
+ "limit_key": "MaxResults",
+ "result_key": "Items"
+ },
+ "ListLinks": {
+ "input_token": "NextToken",
+ "output_token": "NextToken",
+ "limit_key": "MaxResults",
+ "result_key": "Items"
+ },
+ "ListSinks": {
+ "input_token": "NextToken",
+ "output_token": "NextToken",
+ "limit_key": "MaxResults",
+ "result_key": "Items"
+ }
+ }
+}
diff --git a/apis/organizations-2016-11-28.min.json b/apis/organizations-2016-11-28.min.json
index a493f05b2f..4181083fa7 100644
--- a/apis/organizations-2016-11-28.min.json
+++ b/apis/organizations-2016-11-28.min.json
@@ -245,6 +245,7 @@
}
}
},
+ "DeleteResourcePolicy": {},
"DeregisterDelegatedAdministrator": {
"input": {
"type": "structure",
@@ -391,6 +392,16 @@
}
}
},
+ "DescribeResourcePolicy": {
+ "output": {
+ "type": "structure",
+ "members": {
+ "ResourcePolicy": {
+ "shape": "S2l"
+ }
+ }
+ }
+ },
"DetachPolicy": {
"input": {
"type": "structure",
@@ -431,7 +442,7 @@
"type": "structure",
"members": {
"Root": {
- "shape": "S2p"
+ "shape": "S2v"
}
}
}
@@ -477,7 +488,7 @@
"type": "structure",
"members": {
"Root": {
- "shape": "S2p"
+ "shape": "S2v"
}
}
}
@@ -554,7 +565,7 @@
"type": "structure",
"members": {
"Accounts": {
- "shape": "S38"
+ "shape": "S3e"
},
"NextToken": {}
}
@@ -578,7 +589,7 @@
"type": "structure",
"members": {
"Accounts": {
- "shape": "S38"
+ "shape": "S3e"
},
"NextToken": {}
}
@@ -724,7 +735,7 @@
"type": "structure",
"members": {
"Filter": {
- "shape": "S3u"
+ "shape": "S40"
},
"NextToken": {},
"MaxResults": {
@@ -736,7 +747,7 @@
"type": "structure",
"members": {
"Handshakes": {
- "shape": "S3w"
+ "shape": "S42"
},
"NextToken": {}
}
@@ -747,7 +758,7 @@
"type": "structure",
"members": {
"Filter": {
- "shape": "S3u"
+ "shape": "S40"
},
"NextToken": {},
"MaxResults": {
@@ -759,7 +770,7 @@
"type": "structure",
"members": {
"Handshakes": {
- "shape": "S3w"
+ "shape": "S42"
},
"NextToken": {}
}
@@ -841,7 +852,7 @@
"type": "structure",
"members": {
"Policies": {
- "shape": "S49"
+ "shape": "S4f"
},
"NextToken": {}
}
@@ -867,7 +878,7 @@
"type": "structure",
"members": {
"Policies": {
- "shape": "S49"
+ "shape": "S4f"
},
"NextToken": {}
}
@@ -889,7 +900,7 @@
"Roots": {
"type": "list",
"member": {
- "shape": "S2p"
+ "shape": "S2v"
}
},
"NextToken": {}
@@ -965,6 +976,28 @@
}
}
},
+ "PutResourcePolicy": {
+ "input": {
+ "type": "structure",
+ "required": [
+ "Content"
+ ],
+ "members": {
+ "Content": {},
+ "Tags": {
+ "shape": "St"
+ }
+ }
+ },
+ "output": {
+ "type": "structure",
+ "members": {
+ "ResourcePolicy": {
+ "shape": "S2l"
+ }
+ }
+ }
+ },
"RegisterDelegatedAdministrator": {
"input": {
"type": "structure",
@@ -1237,7 +1270,20 @@
"type": "string",
"sensitive": true
},
- "S2p": {
+ "S2l": {
+ "type": "structure",
+ "members": {
+ "ResourcePolicySummary": {
+ "type": "structure",
+ "members": {
+ "Id": {},
+ "Arn": {}
+ }
+ },
+ "Content": {}
+ }
+ },
+ "S2v": {
"type": "structure",
"members": {
"Id": {},
@@ -1248,26 +1294,26 @@
}
}
},
- "S38": {
+ "S3e": {
"type": "list",
"member": {
"shape": "S23"
}
},
- "S3u": {
+ "S40": {
"type": "structure",
"members": {
"ActionType": {},
"ParentHandshakeId": {}
}
},
- "S3w": {
+ "S42": {
"type": "list",
"member": {
"shape": "S4"
}
},
- "S49": {
+ "S4f": {
"type": "list",
"member": {
"shape": "S1s"
diff --git a/apis/organizations-2016-11-28.normal.json b/apis/organizations-2016-11-28.normal.json
index f360e557a1..301e674785 100644
--- a/apis/organizations-2016-11-28.normal.json
+++ b/apis/organizations-2016-11-28.normal.json
@@ -60,7 +60,7 @@
"shape": "AccessDeniedForDependencyException"
}
],
- "documentation": "Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.
This operation can be called only by the following principals when they also have the relevant IAM permissions:
Invitation to join or Approve all features request handshakes: only a principal from the member account.
The user who calls the API for an invitation to join must have the organizations:AcceptHandshake
permission. If you enabled all features in the organization, the user must also have the iam:CreateServiceLinkedRole
permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations
. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.
Enable all features final confirmation handshake: only a principal from the management account.
For more information about invitations, see Inviting an Amazon Web Services account to join your organization in the Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling all features in your organization in the Organizations User Guide.
After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that, it's deleted.
" + "documentation": "Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.
You can only call this operation by the following principals when they also have the relevant IAM permissions:
Invitation to join or Approve all features request handshakes: only a principal from the member account.
The user who calls the API for an invitation to join must have the organizations:AcceptHandshake
permission. If you enabled all features in the organization, the user must also have the iam:CreateServiceLinkedRole
permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations
. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.
Enable all features final confirmation handshake: only a principal from the management account.
For more information about invitations, see Inviting an Amazon Web Services account to join your organization in the Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling all features in your organization in the Organizations User Guide.
After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that, it's deleted.
" }, "AttachPolicy": { "name": "AttachPolicy", @@ -241,7 +241,7 @@ "shape": "UnsupportedAPIEndpointException" } ], - "documentation": "Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount
operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:
Use the Id
member of the CreateAccountStatus
response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.
Check the CloudTrail log for the CreateAccountResult
event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.
The user who calls the API to create an account must have the organizations:CreateAccount
permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations
. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.
If the request includes tags, then the requester must have the organizations:TagResource
permission.
Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole
by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account.
This operation can be called only from the organization's management account.
For more information about creating accounts, see Creating an Amazon Web Services account in Your Organization in the Organizations User Guide.
When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the Organizations User Guide.
If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.
If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.
Using CreateAccount
to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an Amazon Web Services account in the Organizations User Guide.
When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.
Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount
operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:
Use the Id
value of the CreateAccountStatus
response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.
Check the CloudTrail log for the CreateAccountResult
event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.
The user who calls the API to create an account must have the organizations:CreateAccount
permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations
. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.
If the request includes tags, then the requester must have the organizations:TagResource
permission.
Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole
by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account.
This operation can be called only from the organization's management account.
For more information about creating accounts, see Creating an Amazon Web Services account in Your Organization in the Organizations User Guide.
When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the Organizations User Guide.
If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.
If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.
Using CreateAccount
to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an Amazon Web Services account in the Organizations User Guide.
When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.
Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.
This operation can be called only from the organization's management account.
" }, + "DeleteResourcePolicy": { + "name": "DeleteResourcePolicy", + "http": { + "method": "POST", + "requestUri": "/" + }, + "errors": [ + { + "shape": "AccessDeniedException" + }, + { + "shape": "ServiceException" + }, + { + "shape": "UnsupportedAPIEndpointException" + }, + { + "shape": "TooManyRequestsException" + }, + { + "shape": "ConcurrentModificationException" + }, + { + "shape": "ConstraintViolationException" + }, + { + "shape": "AWSOrganizationsNotInUseException" + }, + { + "shape": "ResourcePolicyNotFoundException" + } + ], + "documentation": "Deletes the resource policy from your organization.
You can only call this operation from the organization's management account.
" + }, "DeregisterDelegatedAdministrator": { "name": "DeregisterDelegatedAdministrator", "http": { @@ -856,6 +890,40 @@ ], "documentation": "Retrieves information about a policy.
This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.
" }, + "DescribeResourcePolicy": { + "name": "DescribeResourcePolicy", + "http": { + "method": "POST", + "requestUri": "/" + }, + "output": { + "shape": "DescribeResourcePolicyResponse" + }, + "errors": [ + { + "shape": "AccessDeniedException" + }, + { + "shape": "ServiceException" + }, + { + "shape": "UnsupportedAPIEndpointException" + }, + { + "shape": "TooManyRequestsException" + }, + { + "shape": "AWSOrganizationsNotInUseException" + }, + { + "shape": "ResourcePolicyNotFoundException" + }, + { + "shape": "ConstraintViolationException" + } + ], + "documentation": "Retrieves information about a resource policy.
You can only call this operation from the organization's management account or by a member account that is a delegated administrator for an AWS service.
" + }, "DetachPolicy": { "name": "DetachPolicy", "http": { @@ -1026,7 +1094,7 @@ "shape": "UnsupportedAPIEndpointException" } ], - "documentation": "Enables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal
) with Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts.
We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.
For more information about enabling services to integrate with Organizations, see Integrating Organizations with Other Amazon Web Services Services in the Organizations User Guide.
This operation can be called only from the organization's management account and only if the organization has enabled all features.
" + "documentation": "Enables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal
) with Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts.
We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.
For more information about enabling services to integrate with Organizations, see Integrating Organizations with Other Amazon Web Services Services in the Organizations User Guide.
You can only call this operation from the organization's management account and only if the organization has enabled all features.
" }, "EnableAllFeatures": { "name": "EnableAllFeatures", @@ -1802,6 +1870,46 @@ ], "documentation": "Moves an account from its current source parent root or organizational unit (OU) to the specified destination parent root or OU.
This operation can be called only from the organization's management account.
" }, + "PutResourcePolicy": { + "name": "PutResourcePolicy", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "PutResourcePolicyRequest" + }, + "output": { + "shape": "PutResourcePolicyResponse" + }, + "errors": [ + { + "shape": "AccessDeniedException" + }, + { + "shape": "ServiceException" + }, + { + "shape": "UnsupportedAPIEndpointException" + }, + { + "shape": "TooManyRequestsException" + }, + { + "shape": "ConcurrentModificationException" + }, + { + "shape": "InvalidInputException" + }, + { + "shape": "ConstraintViolationException" + }, + { + "shape": "AWSOrganizationsNotInUseException" + } + ], + "documentation": "Creates or updates a resource policy.
You can only call this operation from the organization's management account.
" + }, "RegisterDelegatedAdministrator": { "name": "RegisterDelegatedAdministrator", "http": { @@ -2255,7 +2363,8 @@ "INVALID_IDENTITY_FOR_BUSINESS_VALIDATION", "UNKNOWN_BUSINESS_VALIDATION", "MISSING_PAYMENT_INSTRUMENT", - "INVALID_PAYMENT_INSTRUMENT" + "INVALID_PAYMENT_INSTRUMENT", + "UPDATE_EXISTING_RESOURCE_POLICY_WITH_TAGS_NOT_SUPPORTED" ] }, "CreateAccountName": { @@ -2282,7 +2391,7 @@ }, "RoleName": { "shape": "RoleName", - "documentation": "(Optional)
The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.
If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole
.
For more information about how to use this role to access the member account, see the following links:
Accessing and Administering the Member Accounts in Your Organization in the Organizations User Guide
Steps 2 and 3 in Tutorial: Delegate Access Across Amazon Web Services accounts Using IAM Roles in the IAM User Guide
The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-
" + "documentation": "The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.
If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole
.
For more information about how to use this role to access the member account, see the following links:
Accessing and Administering the Member Accounts in Your Organization in the Organizations User Guide
Steps 2 and 3 in Tutorial: Delegate Access Across Amazon Web Services accounts Using IAM Roles in the IAM User Guide
The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-
" }, "IamUserAccessToBilling": { "shape": "IAMUserAccessToBilling", @@ -2756,6 +2865,15 @@ } } }, + "DescribeResourcePolicyResponse": { + "type": "structure", + "members": { + "ResourcePolicy": { + "shape": "ResourcePolicy", + "documentation": "A structure that contains details about the resource policy.
" + } + } + }, "DetachPolicyRequest": { "type": "structure", "required": [ @@ -3923,6 +4041,31 @@ "shape": "PolicyTypeSummary" } }, + "PutResourcePolicyRequest": { + "type": "structure", + "required": [ + "Content" + ], + "members": { + "Content": { + "shape": "ResourcePolicyContent", + "documentation": "If provided, the new content for the resource policy. The text must be correctly formatted JSON that complies with the syntax for the resource policy's type. For more information, see Service Control Policy Syntax in the Organizations User Guide.
" + }, + "Tags": { + "shape": "Tags", + "documentation": "Updates the list of tags that you want to attach to the newly-created resource policy. For each tag in the list, you must specify both a tag key and a value. You can set the value to an empty string, but you can't set it to null
. For more information about tagging, see Tagging Organizations resources in the Organizations User Guide.
Calls with tags apply to the initial creation of the resource policy, otherwise an exception is thrown. If any one of the tags is invalid or if you exceed the allowed number of tags for the resource policy, then the entire request fails and the resource policy is not created.
A structure that contains details about the resource policy.
" + } + } + }, "RegisterDelegatedAdministratorRequest": { "type": "structure", "required": [ @@ -3952,6 +4095,49 @@ } } }, + "ResourcePolicy": { + "type": "structure", + "members": { + "ResourcePolicySummary": { + "shape": "ResourcePolicySummary", + "documentation": "A structure that contains resource policy ID and Amazon Resource Name (ARN).
" + }, + "Content": { + "shape": "ResourcePolicyContent", + "documentation": "The policy text of the resource policy.
" + } + }, + "documentation": "A structure that contains details about a resource policy.
" + }, + "ResourcePolicyArn": { + "type": "string", + "pattern": "^arn:[a-z0-9][a-z0-9-.]{0,62}:organizations::\\d{12}:resourcepolicy\\/o-[a-z0-9]{10,32}\\/rp-[0-9a-zA-Z_]{4,128}" + }, + "ResourcePolicyContent": { + "type": "string", + "max": 40000, + "min": 1, + "pattern": "[\\s\\S]*" + }, + "ResourcePolicyId": { + "type": "string", + "max": 131, + "pattern": "^rp-[0-9a-zA-Z_]{4,128}$" + }, + "ResourcePolicySummary": { + "type": "structure", + "members": { + "Id": { + "shape": "ResourcePolicyId", + "documentation": "The unique identifier (ID) of the resource policy.
" + }, + "Arn": { + "shape": "ResourcePolicyArn", + "documentation": "The Amazon Resource Name (ARN) of the resource policy.
" + } + }, + "documentation": "A structure that contains resource policy ID and Amazon Resource Name (ARN).
" + }, "RoleName": { "type": "string", "max": 64, @@ -4061,7 +4247,7 @@ "TaggableResourceId": { "type": "string", "max": 130, - "pattern": "^(r-[0-9a-z]{4,32})|(\\d{12})|(ou-[0-9a-z]{4,32}-[a-z0-9]{8,32})|(^p-[0-9a-zA-Z_]{8,128})$" + "pattern": "^(r-[0-9a-z]{4,32})|(\\d{12})|(ou-[0-9a-z]{4,32}-[a-z0-9]{8,32})|(^p-[0-9a-zA-Z_]{8,128})|(^rp-[0-9a-zA-Z_]{4,128})$" }, "Tags": { "type": "list", diff --git a/apis/rds-2014-10-31.min.json b/apis/rds-2014-10-31.min.json index 402e8f2c0c..6497979565 100644 --- a/apis/rds-2014-10-31.min.json +++ b/apis/rds-2014-10-31.min.json @@ -311,6 +311,34 @@ } } }, + "CreateBlueGreenDeployment": { + "input": { + "type": "structure", + "required": [ + "BlueGreenDeploymentName", + "Source" + ], + "members": { + "BlueGreenDeploymentName": {}, + "Source": {}, + "TargetEngineVersion": {}, + "TargetDBParameterGroupName": {}, + "TargetDBClusterParameterGroupName": {}, + "Tags": { + "shape": "Sb" + } + } + }, + "output": { + "resultWrapper": "CreateBlueGreenDeploymentResult", + "type": "structure", + "members": { + "BlueGreenDeployment": { + "shape": "S1w" + } + } + } + }, "CreateCustomDBEngineVersion": { "input": { "type": "structure", @@ -335,7 +363,7 @@ } }, "output": { - "shape": "S1x", + "shape": "S2f", "resultWrapper": "CreateCustomDBEngineVersionResult" } }, @@ -358,7 +386,7 @@ "DBClusterIdentifier": {}, "DBClusterParameterGroupName": {}, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "DBSubnetGroupName": {}, "Engine": {}, @@ -387,11 +415,11 @@ "type": "long" }, "EnableCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "EngineMode": {}, "ScalingConfiguration": { - "shape": "S2a" + "shape": "S2s" }, "DeletionProtection": { "type": "boolean" @@ -434,7 +462,7 @@ "type": "integer" }, "ServerlessV2ScalingConfiguration": { - "shape": "S2b" + "shape": "S2t" }, "NetworkType": {}, "DBSystemId": {}, @@ -446,7 +474,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -475,7 +503,7 @@ } }, "output": { - "shape": "S2w", + "shape": "S3e", "resultWrapper": "CreateDBClusterEndpointResult" } }, @@ -550,10 +578,10 @@ "MasterUsername": {}, "MasterUserPassword": {}, "DBSecurityGroups": { - "shape": "S32" + "shape": "S3k" }, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "AvailabilityZone": {}, "DBSubnetGroupName": {}, @@ -618,7 +646,7 @@ "type": "integer" }, "EnableCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "ProcessorFeatures": { "shape": "S1c" @@ -645,7 +673,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -684,7 +712,7 @@ }, "DBSubnetGroupName": {}, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "StorageType": {}, "CopyTagsToSnapshot": { @@ -707,7 +735,7 @@ "type": "integer" }, "EnableCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "ProcessorFeatures": { "shape": "S1c" @@ -737,7 +765,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -783,7 +811,7 @@ "DBProxyName": {}, "EngineFamily": {}, "Auth": { - "shape": "S3x" + "shape": "S4f" }, "RoleArn": {}, "VpcSubnetIds": { @@ -811,7 +839,7 @@ "type": "structure", "members": { "DBProxy": { - "shape": "S42" + "shape": "S4k" } } } @@ -844,7 +872,7 @@ "type": "structure", "members": { "DBProxyEndpoint": { - "shape": "S4b" + "shape": "S4t" } } } @@ -911,7 +939,7 @@ "DBSubnetGroupName": {}, "DBSubnetGroupDescription": {}, "SubnetIds": { - "shape": "S4i" + "shape": "S50" }, "Tags": { "shape": "Sb" @@ -923,7 +951,7 @@ "type": "structure", "members": { "DBSubnetGroup": { - "shape": "S38" + "shape": "S3q" } } } @@ -985,7 +1013,7 @@ "type": "structure", "members": { "GlobalCluster": { - "shape": "S4o" + "shape": "S56" } } } @@ -1019,6 +1047,29 @@ } } }, + "DeleteBlueGreenDeployment": { + "input": { + "type": "structure", + "required": [ + "BlueGreenDeploymentIdentifier" + ], + "members": { + "BlueGreenDeploymentIdentifier": {}, + "DeleteTarget": { + "type": "boolean" + } + } + }, + "output": { + "resultWrapper": "DeleteBlueGreenDeploymentResult", + "type": "structure", + "members": { + "BlueGreenDeployment": { + "shape": "S1w" + } + } + } + }, "DeleteCustomDBEngineVersion": { "input": { "type": "structure", @@ -1032,7 +1083,7 @@ } }, "output": { - "shape": "S1x", + "shape": "S2f", "resultWrapper": "DeleteCustomDBEngineVersionResult" } }, @@ -1055,7 +1106,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -1071,7 +1122,7 @@ } }, "output": { - "shape": "S2w", + "shape": "S3e", "resultWrapper": "DeleteDBClusterEndpointResult" } }, @@ -1128,7 +1179,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -1146,7 +1197,7 @@ "type": "structure", "members": { "DBInstanceAutomatedBackup": { - "shape": "S57" + "shape": "S5r" } } } @@ -1177,7 +1228,7 @@ "type": "structure", "members": { "DBProxy": { - "shape": "S42" + "shape": "S4k" } } } @@ -1197,7 +1248,7 @@ "type": "structure", "members": { "DBProxyEndpoint": { - "shape": "S4b" + "shape": "S4t" } } } @@ -1279,7 +1330,7 @@ "type": "structure", "members": { "GlobalCluster": { - "shape": "S4o" + "shape": "S56" } } } @@ -1347,13 +1398,41 @@ } } }, + "DescribeBlueGreenDeployments": { + "input": { + "type": "structure", + "members": { + "BlueGreenDeploymentIdentifier": {}, + "Filters": { + "shape": "S6f" + }, + "Marker": {}, + "MaxRecords": { + "type": "integer" + } + } + }, + "output": { + "resultWrapper": "DescribeBlueGreenDeploymentsResult", + "type": "structure", + "members": { + "BlueGreenDeployments": { + "type": "list", + "member": { + "shape": "S1w" + } + }, + "Marker": {} + } + } + }, "DescribeCertificates": { "input": { "type": "structure", "members": { "CertificateIdentifier": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1368,7 +1447,7 @@ "Certificates": { "type": "list", "member": { - "shape": "S60", + "shape": "S6o", "locationName": "Certificate" } }, @@ -1386,7 +1465,7 @@ "DBClusterIdentifier": {}, "BacktrackIdentifier": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1416,7 +1495,7 @@ "DBClusterIdentifier": {}, "DBClusterEndpointIdentifier": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1432,7 +1511,7 @@ "DBClusterEndpoints": { "type": "list", "member": { - "shape": "S2w", + "shape": "S3e", "locationName": "DBClusterEndpointList" } } @@ -1445,7 +1524,7 @@ "members": { "DBClusterParameterGroupName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1478,7 +1557,7 @@ "DBClusterParameterGroupName": {}, "Source": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1491,7 +1570,7 @@ "type": "structure", "members": { "Parameters": { - "shape": "S6c" + "shape": "S70" }, "Marker": {} } @@ -1512,7 +1591,7 @@ "type": "structure", "members": { "DBClusterSnapshotAttributesResult": { - "shape": "S6h" + "shape": "S75" } } } @@ -1525,7 +1604,7 @@ "DBClusterSnapshotIdentifier": {}, "SnapshotType": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1560,7 +1639,7 @@ "members": { "DBClusterIdentifier": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1579,7 +1658,7 @@ "DBClusters": { "type": "list", "member": { - "shape": "S2e", + "shape": "S2w", "locationName": "DBCluster" } } @@ -1594,7 +1673,7 @@ "EngineVersion": {}, "DBParameterGroupFamily": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1622,7 +1701,7 @@ "DBEngineVersions": { "type": "list", "member": { - "shape": "S1x", + "shape": "S2f", "locationName": "DBEngineVersion" } } @@ -1636,7 +1715,7 @@ "DbiResourceId": {}, "DBInstanceIdentifier": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1653,7 +1732,7 @@ "DBInstanceAutomatedBackups": { "type": "list", "member": { - "shape": "S57", + "shape": "S5r", "locationName": "DBInstanceAutomatedBackup" } } @@ -1666,7 +1745,7 @@ "members": { "DBInstanceIdentifier": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1682,7 +1761,7 @@ "DBInstances": { "type": "list", "member": { - "shape": "S34", + "shape": "S3m", "locationName": "DBInstance" } } @@ -1705,7 +1784,7 @@ "type": "long" }, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1743,7 +1822,7 @@ "members": { "DBParameterGroupName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1776,7 +1855,7 @@ "DBParameterGroupName": {}, "Source": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1789,7 +1868,7 @@ "type": "structure", "members": { "Parameters": { - "shape": "S6c" + "shape": "S70" }, "Marker": {} } @@ -1801,7 +1880,7 @@ "members": { "DBProxyName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "Marker": {}, "MaxRecords": { @@ -1816,7 +1895,7 @@ "DBProxies": { "type": "list", "member": { - "shape": "S42" + "shape": "S4k" } }, "Marker": {} @@ -1830,7 +1909,7 @@ "DBProxyName": {}, "DBProxyEndpointName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "Marker": {}, "MaxRecords": { @@ -1845,7 +1924,7 @@ "DBProxyEndpoints": { "type": "list", "member": { - "shape": "S4b" + "shape": "S4t" } }, "Marker": {} @@ -1862,7 +1941,7 @@ "DBProxyName": {}, "TargetGroupName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "Marker": {}, "MaxRecords": { @@ -1877,7 +1956,7 @@ "TargetGroups": { "type": "list", "member": { - "shape": "S7j" + "shape": "S86" } }, "Marker": {} @@ -1894,7 +1973,7 @@ "DBProxyName": {}, "TargetGroupName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "Marker": {}, "MaxRecords": { @@ -1907,7 +1986,7 @@ "type": "structure", "members": { "Targets": { - "shape": "S7n" + "shape": "S8a" }, "Marker": {} } @@ -1919,7 +1998,7 @@ "members": { "DBSecurityGroupName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -1957,7 +2036,7 @@ "type": "structure", "members": { "DBSnapshotAttributesResult": { - "shape": "S7z" + "shape": "S8m" } } } @@ -1970,7 +2049,7 @@ "DBSnapshotIdentifier": {}, "SnapshotType": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2006,7 +2085,7 @@ "members": { "DBSubnetGroupName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2022,7 +2101,7 @@ "DBSubnetGroups": { "type": "list", "member": { - "shape": "S38", + "shape": "S3q", "locationName": "DBSubnetGroup" } } @@ -2038,7 +2117,7 @@ "members": { "DBParameterGroupFamily": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2051,7 +2130,7 @@ "type": "structure", "members": { "EngineDefaults": { - "shape": "S8a" + "shape": "S8x" } } } @@ -2065,7 +2144,7 @@ "members": { "DBParameterGroupFamily": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2078,7 +2157,7 @@ "type": "structure", "members": { "EngineDefaults": { - "shape": "S8a" + "shape": "S8x" } } } @@ -2089,7 +2168,7 @@ "members": { "SourceType": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" } } }, @@ -2120,7 +2199,7 @@ "members": { "SubscriptionName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2162,7 +2241,7 @@ "shape": "S8" }, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2204,7 +2283,7 @@ "ExportTaskIdentifier": {}, "SourceArn": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "Marker": {}, "MaxRecords": { @@ -2234,7 +2313,7 @@ "members": { "GlobalClusterIdentifier": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2250,7 +2329,7 @@ "GlobalClusters": { "type": "list", "member": { - "shape": "S4o", + "shape": "S56", "locationName": "GlobalClusterMember" } } @@ -2267,7 +2346,7 @@ "EngineName": {}, "MajorEngineVersion": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2380,7 +2459,7 @@ "members": { "OptionGroupName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "Marker": {}, "MaxRecords": { @@ -2421,7 +2500,7 @@ "type": "boolean" }, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2447,7 +2526,7 @@ "AvailabilityZones": { "type": "list", "member": { - "shape": "S3b", + "shape": "S3t", "locationName": "AvailabilityZone" } }, @@ -2495,10 +2574,10 @@ "type": "double" }, "AvailableProcessorFeatures": { - "shape": "S9f" + "shape": "Sa2" }, "SupportedEngineModes": { - "shape": "S22" + "shape": "S2k" }, "SupportsStorageAutoscaling": { "type": "boolean" @@ -2551,7 +2630,7 @@ "members": { "ResourceIdentifier": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "Marker": {}, "MaxRecords": { @@ -2589,7 +2668,7 @@ }, "LeaseId": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2605,7 +2684,7 @@ "ReservedDBInstances": { "type": "list", "member": { - "shape": "S9o", + "shape": "Sab", "locationName": "ReservedDBInstance" } } @@ -2625,7 +2704,7 @@ "type": "boolean" }, "Filters": { - "shape": "S5v" + "shape": "S6f" }, "MaxRecords": { "type": "integer" @@ -2662,7 +2741,7 @@ "type": "boolean" }, "RecurringCharges": { - "shape": "S9q" + "shape": "Sad" } }, "wrapper": true @@ -2681,7 +2760,7 @@ }, "Marker": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" } } }, @@ -2733,28 +2812,28 @@ "members": { "StorageType": {}, "StorageSize": { - "shape": "Sa5" + "shape": "Sas" }, "ProvisionedIops": { - "shape": "Sa5" + "shape": "Sas" }, "IopsToStorageRatio": { - "shape": "Sa7" + "shape": "Sau" }, "SupportsStorageAutoscaling": { "type": "boolean" }, "ProvisionedStorageThroughput": { - "shape": "Sa5" + "shape": "Sas" }, "StorageThroughputToIopsRatio": { - "shape": "Sa7" + "shape": "Sau" } } } }, "ValidProcessorFeatures": { - "shape": "S9f" + "shape": "Sa2" } }, "wrapper": true @@ -2806,7 +2885,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -2828,7 +2907,7 @@ "type": "structure", "members": { "GlobalCluster": { - "shape": "S4o" + "shape": "S56" } } } @@ -2842,7 +2921,7 @@ "members": { "ResourceName": {}, "Filters": { - "shape": "S5v" + "shape": "S6f" } } }, @@ -2894,7 +2973,7 @@ "type": "structure", "members": { "Certificate": { - "shape": "S60" + "shape": "S6o" } } } @@ -2949,7 +3028,7 @@ } }, "output": { - "shape": "S1x", + "shape": "S2f", "resultWrapper": "ModifyCustomDBEngineVersionResult" } }, @@ -2970,7 +3049,7 @@ }, "DBClusterParameterGroupName": {}, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "Port": { "type": "integer" @@ -2986,7 +3065,7 @@ "type": "long" }, "CloudwatchLogsExportConfiguration": { - "shape": "Sat" + "shape": "Sbg" }, "EngineVersion": {}, "AllowMajorVersionUpgrade": { @@ -2996,7 +3075,7 @@ "Domain": {}, "DomainIAMRoleName": {}, "ScalingConfiguration": { - "shape": "S2a" + "shape": "S2s" }, "DeletionProtection": { "type": "boolean" @@ -3033,7 +3112,7 @@ "type": "integer" }, "ServerlessV2ScalingConfiguration": { - "shape": "S2b" + "shape": "S2t" }, "NetworkType": {} } @@ -3043,7 +3122,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -3066,7 +3145,7 @@ } }, "output": { - "shape": "S2w", + "shape": "S3e", "resultWrapper": "ModifyDBClusterEndpointResult" } }, @@ -3080,12 +3159,12 @@ "members": { "DBClusterParameterGroupName": {}, "Parameters": { - "shape": "S6c" + "shape": "S70" } } }, "output": { - "shape": "Sax", + "shape": "Sbk", "resultWrapper": "ModifyDBClusterParameterGroupResult" } }, @@ -3100,10 +3179,10 @@ "DBClusterSnapshotIdentifier": {}, "AttributeName": {}, "ValuesToAdd": { - "shape": "S6k" + "shape": "S78" }, "ValuesToRemove": { - "shape": "S6k" + "shape": "S78" } } }, @@ -3112,7 +3191,7 @@ "type": "structure", "members": { "DBClusterSnapshotAttributesResult": { - "shape": "S6h" + "shape": "S75" } } } @@ -3131,10 +3210,10 @@ "DBInstanceClass": {}, "DBSubnetGroupName": {}, "DBSecurityGroups": { - "shape": "S32" + "shape": "S3k" }, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "ApplyImmediately": { "type": "boolean" @@ -3195,7 +3274,7 @@ "type": "integer" }, "CloudwatchLogsExportConfiguration": { - "shape": "Sat" + "shape": "Sbg" }, "ProcessorFeatures": { "shape": "S1c" @@ -3232,7 +3311,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -3247,12 +3326,12 @@ "members": { "DBParameterGroupName": {}, "Parameters": { - "shape": "S6c" + "shape": "S70" } } }, "output": { - "shape": "Sb4", + "shape": "Sbr", "resultWrapper": "ModifyDBParameterGroupResult" } }, @@ -3266,7 +3345,7 @@ "DBProxyName": {}, "NewDBProxyName": {}, "Auth": { - "shape": "S3x" + "shape": "S4f" }, "RequireTLS": { "type": "boolean" @@ -3288,7 +3367,7 @@ "type": "structure", "members": { "DBProxy": { - "shape": "S42" + "shape": "S4k" } } } @@ -3312,7 +3391,7 @@ "type": "structure", "members": { "DBProxyEndpoint": { - "shape": "S4b" + "shape": "S4t" } } } @@ -3353,7 +3432,7 @@ "type": "structure", "members": { "DBProxyTargetGroup": { - "shape": "S7j" + "shape": "S86" } } } @@ -3391,10 +3470,10 @@ "DBSnapshotIdentifier": {}, "AttributeName": {}, "ValuesToAdd": { - "shape": "S6k" + "shape": "S78" }, "ValuesToRemove": { - "shape": "S6k" + "shape": "S78" } } }, @@ -3403,7 +3482,7 @@ "type": "structure", "members": { "DBSnapshotAttributesResult": { - "shape": "S7z" + "shape": "S8m" } } } @@ -3419,7 +3498,7 @@ "DBSubnetGroupName": {}, "DBSubnetGroupDescription": {}, "SubnetIds": { - "shape": "S4i" + "shape": "S50" } } }, @@ -3428,7 +3507,7 @@ "type": "structure", "members": { "DBSubnetGroup": { - "shape": "S38" + "shape": "S3q" } } } @@ -3481,7 +3560,7 @@ "type": "structure", "members": { "GlobalCluster": { - "shape": "S4o" + "shape": "S56" } } } @@ -3509,10 +3588,10 @@ }, "OptionVersion": {}, "DBSecurityGroupMemberships": { - "shape": "S32" + "shape": "S3k" }, "VpcSecurityGroupMemberships": { - "shape": "S28" + "shape": "S2q" }, "OptionSettings": { "type": "list", @@ -3562,7 +3641,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -3582,7 +3661,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -3609,7 +3688,7 @@ "type": "structure", "members": { "ReservedDBInstance": { - "shape": "S9o" + "shape": "Sab" } } } @@ -3629,7 +3708,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -3652,7 +3731,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -3679,7 +3758,7 @@ "type": "structure", "members": { "DBProxyTargets": { - "shape": "S7n" + "shape": "S8a" } } } @@ -3697,7 +3776,7 @@ "type": "structure", "members": { "GlobalCluster": { - "shape": "S4o" + "shape": "S56" } } } @@ -3781,12 +3860,12 @@ "type": "boolean" }, "Parameters": { - "shape": "S6c" + "shape": "S70" } } }, "output": { - "shape": "Sax", + "shape": "Sbk", "resultWrapper": "ResetDBClusterParameterGroupResult" } }, @@ -3802,12 +3881,12 @@ "type": "boolean" }, "Parameters": { - "shape": "S6c" + "shape": "S70" } } }, "output": { - "shape": "Sb4", + "shape": "Sbr", "resultWrapper": "ResetDBParameterGroupResult" } }, @@ -3836,7 +3915,7 @@ "DBClusterIdentifier": {}, "DBClusterParameterGroupName": {}, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "DBSubnetGroupName": {}, "Engine": {}, @@ -3868,7 +3947,7 @@ "type": "long" }, "EnableCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "DeletionProtection": { "type": "boolean" @@ -3879,7 +3958,7 @@ "Domain": {}, "DomainIAMRoleName": {}, "ServerlessV2ScalingConfiguration": { - "shape": "S2b" + "shape": "S2t" }, "NetworkType": {} } @@ -3889,7 +3968,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -3917,7 +3996,7 @@ "DatabaseName": {}, "OptionGroupName": {}, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "Tags": { "shape": "Sb" @@ -3930,11 +4009,11 @@ "type": "long" }, "EnableCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "EngineMode": {}, "ScalingConfiguration": { - "shape": "S2a" + "shape": "S2s" }, "DBClusterParameterGroupName": {}, "DeletionProtection": { @@ -3954,7 +4033,7 @@ "type": "boolean" }, "ServerlessV2ScalingConfiguration": { - "shape": "S2b" + "shape": "S2t" }, "NetworkType": {} } @@ -3964,7 +4043,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -3992,7 +4071,7 @@ "DBSubnetGroupName": {}, "OptionGroupName": {}, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "Tags": { "shape": "Sb" @@ -4005,7 +4084,7 @@ "type": "long" }, "EnableCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "DBClusterParameterGroupName": {}, "DeletionProtection": { @@ -4017,7 +4096,7 @@ "Domain": {}, "DomainIAMRoleName": {}, "ScalingConfiguration": { - "shape": "S2a" + "shape": "S2s" }, "EngineMode": {}, "DBClusterInstanceClass": {}, @@ -4029,7 +4108,7 @@ "type": "integer" }, "ServerlessV2ScalingConfiguration": { - "shape": "S2b" + "shape": "S2t" }, "NetworkType": {} } @@ -4039,7 +4118,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -4082,7 +4161,7 @@ "TdeCredentialArn": {}, "TdeCredentialPassword": {}, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "Domain": {}, "CopyTagsToSnapshot": { @@ -4093,7 +4172,7 @@ "type": "boolean" }, "EnableCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "ProcessorFeatures": { "shape": "S1c" @@ -4122,7 +4201,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -4150,10 +4229,10 @@ "MasterUsername": {}, "MasterUserPassword": {}, "DBSecurityGroups": { - "shape": "S32" + "shape": "S3k" }, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "AvailabilityZone": {}, "DBSubnetGroupName": {}, @@ -4212,7 +4291,7 @@ "type": "integer" }, "EnableCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "ProcessorFeatures": { "shape": "S1c" @@ -4237,7 +4316,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -4289,7 +4368,7 @@ "TdeCredentialArn": {}, "TdeCredentialPassword": {}, "VpcSecurityGroupIds": { - "shape": "S28" + "shape": "S2q" }, "Domain": {}, "DomainIAMRoleName": {}, @@ -4297,7 +4376,7 @@ "type": "boolean" }, "EnableCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "ProcessorFeatures": { "shape": "S1c" @@ -4330,7 +4409,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -4411,7 +4490,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -4431,7 +4510,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -4456,7 +4535,7 @@ "type": "structure", "members": { "DBInstanceAutomatedBackup": { - "shape": "S57" + "shape": "S5r" } } } @@ -4526,7 +4605,7 @@ "type": "structure", "members": { "DBCluster": { - "shape": "S2e" + "shape": "S2w" } } } @@ -4547,7 +4626,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -4567,7 +4646,30 @@ "type": "structure", "members": { "DBInstanceAutomatedBackup": { - "shape": "S57" + "shape": "S5r" + } + } + } + }, + "SwitchoverBlueGreenDeployment": { + "input": { + "type": "structure", + "required": [ + "BlueGreenDeploymentIdentifier" + ], + "members": { + "BlueGreenDeploymentIdentifier": {}, + "SwitchoverTimeout": { + "type": "integer" + } + } + }, + "output": { + "resultWrapper": "SwitchoverBlueGreenDeploymentResult", + "type": "structure", + "members": { + "BlueGreenDeployment": { + "shape": "S1w" } } } @@ -4587,7 +4689,7 @@ "type": "structure", "members": { "DBInstance": { - "shape": "S34" + "shape": "S3m" } } } @@ -4994,7 +5096,48 @@ } } }, - "S1x": { + "S1w": { + "type": "structure", + "members": { + "BlueGreenDeploymentIdentifier": {}, + "BlueGreenDeploymentName": {}, + "Source": {}, + "Target": {}, + "SwitchoverDetails": { + "type": "list", + "member": { + "type": "structure", + "members": { + "SourceMember": {}, + "TargetMember": {}, + "Status": {} + } + } + }, + "Tasks": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Name": {}, + "Status": {} + } + } + }, + "Status": {}, + "StatusDetails": {}, + "CreateTime": { + "type": "timestamp" + }, + "DeleteTime": { + "type": "timestamp" + }, + "TagList": { + "shape": "Sb" + } + } + }, + "S2f": { "type": "structure", "members": { "Engine": {}, @@ -5003,13 +5146,13 @@ "DBEngineDescription": {}, "DBEngineVersionDescription": {}, "DefaultCharacterSet": { - "shape": "S1y" + "shape": "S2g" }, "SupportedCharacterSets": { - "shape": "S1z" + "shape": "S2h" }, "SupportedNcharCharacterSets": { - "shape": "S1z" + "shape": "S2h" }, "ValidUpgradeTarget": { "type": "list", @@ -5027,7 +5170,7 @@ "type": "boolean" }, "SupportedEngineModes": { - "shape": "S22" + "shape": "S2k" }, "SupportsParallelQuery": { "type": "boolean" @@ -5052,7 +5195,7 @@ } }, "ExportableLogTypes": { - "shape": "S25" + "shape": "S2n" }, "SupportsLogExportsToCloudwatchLogs": { "type": "boolean" @@ -5061,7 +5204,7 @@ "type": "boolean" }, "SupportedEngineModes": { - "shape": "S22" + "shape": "S2k" }, "SupportedFeatureNames": { "type": "list", @@ -5091,35 +5234,35 @@ "CustomDBEngineVersionManifest": {} } }, - "S1y": { + "S2g": { "type": "structure", "members": { "CharacterSetName": {}, "CharacterSetDescription": {} } }, - "S1z": { + "S2h": { "type": "list", "member": { - "shape": "S1y", + "shape": "S2g", "locationName": "CharacterSet" } }, - "S22": { + "S2k": { "type": "list", "member": {} }, - "S25": { + "S2n": { "type": "list", "member": {} }, - "S28": { + "S2q": { "type": "list", "member": { "locationName": "VpcSecurityGroupId" } }, - "S2a": { + "S2s": { "type": "structure", "members": { "MinCapacity": { @@ -5140,7 +5283,7 @@ } } }, - "S2b": { + "S2t": { "type": "structure", "members": { "MinCapacity": { @@ -5151,7 +5294,7 @@ } } }, - "S2e": { + "S2w": { "type": "structure", "members": { "AllocatedStorage": { @@ -5270,7 +5413,7 @@ "type": "long" }, "EnabledCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "Capacity": { "type": "integer" @@ -5314,7 +5457,7 @@ "type": "boolean" }, "DomainMemberships": { - "shape": "S2p" + "shape": "S37" }, "TagList": { "shape": "Sb" @@ -5327,7 +5470,7 @@ "type": "structure", "members": { "PendingCloudwatchLogsExports": { - "shape": "S2t" + "shape": "S3b" }, "DBClusterIdentifier": {}, "MasterUserPassword": {}, @@ -5384,7 +5527,7 @@ }, "wrapper": true }, - "S2p": { + "S37": { "type": "list", "member": { "locationName": "DomainMembership", @@ -5397,18 +5540,18 @@ } } }, - "S2t": { + "S3b": { "type": "structure", "members": { "LogTypesToEnable": { - "shape": "S25" + "shape": "S2n" }, "LogTypesToDisable": { - "shape": "S25" + "shape": "S2n" } } }, - "S2w": { + "S3e": { "type": "structure", "members": { "DBClusterEndpointIdentifier": {}, @@ -5427,13 +5570,13 @@ "DBClusterEndpointArn": {} } }, - "S32": { + "S3k": { "type": "list", "member": { "locationName": "DBSecurityGroupName" } }, - "S34": { + "S3m": { "type": "structure", "members": { "DBInstanceIdentifier": {}, @@ -5446,7 +5589,7 @@ "MasterUsername": {}, "DBName": {}, "Endpoint": { - "shape": "S35" + "shape": "S3n" }, "AllocatedStorage": { "type": "integer" @@ -5477,7 +5620,7 @@ }, "AvailabilityZone": {}, "DBSubnetGroup": { - "shape": "S38" + "shape": "S3q" }, "PreferredMaintenanceWindow": {}, "PendingModifiedValues": { @@ -5507,7 +5650,7 @@ "CACertificateIdentifier": {}, "DBSubnetGroupName": {}, "PendingCloudwatchLogsExports": { - "shape": "S2t" + "shape": "S3b" }, "ProcessorFeatures": { "shape": "S1c" @@ -5597,7 +5740,7 @@ "DbiResourceId": {}, "CACertificateIdentifier": {}, "DomainMemberships": { - "shape": "S2p" + "shape": "S37" }, "CopyTagsToSnapshot": { "type": "boolean" @@ -5623,7 +5766,7 @@ "type": "integer" }, "EnabledCloudwatchLogsExports": { - "shape": "S25" + "shape": "S2n" }, "ProcessorFeatures": { "shape": "S1c" @@ -5644,7 +5787,7 @@ } }, "ListenerEndpoint": { - "shape": "S35" + "shape": "S3n" }, "MaxAllocatedStorage": { "type": "integer" @@ -5653,7 +5796,7 @@ "shape": "Sb" }, "DBInstanceAutomatedBackupsReplications": { - "shape": "S3o" + "shape": "S46" }, "CustomerOwnedIpEnabled": { "type": "boolean" @@ -5681,7 +5824,7 @@ }, "wrapper": true }, - "S35": { + "S3n": { "type": "structure", "members": { "Address": {}, @@ -5691,7 +5834,7 @@ "HostedZoneId": {} } }, - "S38": { + "S3q": { "type": "structure", "members": { "DBSubnetGroupName": {}, @@ -5706,7 +5849,7 @@ "members": { "SubnetIdentifier": {}, "SubnetAvailabilityZone": { - "shape": "S3b" + "shape": "S3t" }, "SubnetOutpost": { "type": "structure", @@ -5725,14 +5868,14 @@ }, "wrapper": true }, - "S3b": { + "S3t": { "type": "structure", "members": { "Name": {} }, "wrapper": true }, - "S3o": { + "S46": { "type": "list", "member": { "locationName": "DBInstanceAutomatedBackupsReplication", @@ -5742,7 +5885,7 @@ } } }, - "S3x": { + "S4f": { "type": "list", "member": { "type": "structure", @@ -5755,7 +5898,7 @@ } } }, - "S42": { + "S4k": { "type": "structure", "members": { "DBProxyName": {}, @@ -5801,7 +5944,7 @@ } } }, - "S4b": { + "S4t": { "type": "structure", "members": { "DBProxyEndpointName": {}, @@ -5825,13 +5968,13 @@ } } }, - "S4i": { + "S50": { "type": "list", "member": { "locationName": "SubnetIdentifier" } }, - "S4o": { + "S56": { "type": "structure", "members": { "GlobalClusterIdentifier": {}, @@ -5878,7 +6021,7 @@ }, "wrapper": true }, - "S57": { + "S5r": { "type": "structure", "members": { "DBInstanceArn": {}, @@ -5931,7 +6074,7 @@ }, "DBInstanceAutomatedBackupsArn": {}, "DBInstanceAutomatedBackupsReplications": { - "shape": "S3o" + "shape": "S46" }, "BackupTarget": {}, "StorageThroughput": { @@ -5940,7 +6083,7 @@ }, "wrapper": true }, - "S5v": { + "S6f": { "type": "list", "member": { "locationName": "Filter", @@ -5960,7 +6103,7 @@ } } }, - "S60": { + "S6o": { "type": "structure", "members": { "CertificateIdentifier": {}, @@ -5982,7 +6125,7 @@ }, "wrapper": true }, - "S6c": { + "S70": { "type": "list", "member": { "locationName": "Parameter", @@ -6001,12 +6144,12 @@ "MinimumEngineVersion": {}, "ApplyMethod": {}, "SupportedEngineModes": { - "shape": "S22" + "shape": "S2k" } } } }, - "S6h": { + "S75": { "type": "structure", "members": { "DBClusterSnapshotIdentifier": {}, @@ -6018,7 +6161,7 @@ "members": { "AttributeName": {}, "AttributeValues": { - "shape": "S6k" + "shape": "S78" } } } @@ -6026,13 +6169,13 @@ }, "wrapper": true }, - "S6k": { + "S78": { "type": "list", "member": { "locationName": "AttributeValue" } }, - "S7j": { + "S86": { "type": "structure", "members": { "DBProxyName": {}, @@ -6068,7 +6211,7 @@ } } }, - "S7n": { + "S8a": { "type": "list", "member": { "type": "structure", @@ -6093,7 +6236,7 @@ } } }, - "S7z": { + "S8m": { "type": "structure", "members": { "DBSnapshotIdentifier": {}, @@ -6105,7 +6248,7 @@ "members": { "AttributeName": {}, "AttributeValues": { - "shape": "S6k" + "shape": "S78" } }, "wrapper": true @@ -6114,18 +6257,18 @@ }, "wrapper": true }, - "S8a": { + "S8x": { "type": "structure", "members": { "DBParameterGroupFamily": {}, "Marker": {}, "Parameters": { - "shape": "S6c" + "shape": "S70" } }, "wrapper": true }, - "S9f": { + "Sa2": { "type": "list", "member": { "locationName": "AvailableProcessorFeature", @@ -6137,7 +6280,7 @@ } } }, - "S9o": { + "Sab": { "type": "structure", "members": { "ReservedDBInstanceId": {}, @@ -6166,14 +6309,14 @@ }, "State": {}, "RecurringCharges": { - "shape": "S9q" + "shape": "Sad" }, "ReservedDBInstanceArn": {}, "LeaseId": {} }, "wrapper": true }, - "S9q": { + "Sad": { "type": "list", "member": { "locationName": "RecurringCharge", @@ -6187,7 +6330,7 @@ "wrapper": true } }, - "Sa5": { + "Sas": { "type": "list", "member": { "locationName": "Range", @@ -6205,7 +6348,7 @@ } } }, - "Sa7": { + "Sau": { "type": "list", "member": { "locationName": "DoubleRange", @@ -6220,24 +6363,24 @@ } } }, - "Sat": { + "Sbg": { "type": "structure", "members": { "EnableLogTypes": { - "shape": "S25" + "shape": "S2n" }, "DisableLogTypes": { - "shape": "S25" + "shape": "S2n" } } }, - "Sax": { + "Sbk": { "type": "structure", "members": { "DBClusterParameterGroupName": {} } }, - "Sb4": { + "Sbr": { "type": "structure", "members": { "DBParameterGroupName": {} diff --git a/apis/rds-2014-10-31.normal.json b/apis/rds-2014-10-31.normal.json index 2e2ebd3539..6e139f35de 100644 --- a/apis/rds-2014-10-31.normal.json +++ b/apis/rds-2014-10-31.normal.json @@ -362,6 +362,56 @@ ], "documentation": "Copies the specified option group.
" }, + "CreateBlueGreenDeployment": { + "name": "CreateBlueGreenDeployment", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "CreateBlueGreenDeploymentRequest" + }, + "output": { + "shape": "CreateBlueGreenDeploymentResponse", + "resultWrapper": "CreateBlueGreenDeploymentResult" + }, + "errors": [ + { + "shape": "DBInstanceNotFoundFault" + }, + { + "shape": "DBClusterNotFoundFault" + }, + { + "shape": "SourceDatabaseNotSupportedFault" + }, + { + "shape": "SourceClusterNotSupportedFault" + }, + { + "shape": "BlueGreenDeploymentAlreadyExistsFault" + }, + { + "shape": "DBParameterGroupNotFoundFault" + }, + { + "shape": "DBClusterParameterGroupNotFoundFault" + }, + { + "shape": "InstanceQuotaExceededFault" + }, + { + "shape": "DBClusterQuotaExceededFault" + }, + { + "shape": "InvalidDBInstanceStateFault" + }, + { + "shape": "InvalidDBClusterStateFault" + } + ], + "documentation": "Creates a blue/green deployment.
A blue/green deployment creates a staging environment that copies the production environment. In a blue/green deployment, the blue environment is the current production environment. The green environment is the staging environment. The staging environment stays in sync with the current production environment using logical replication.
You can make changes to the databases in the green environment without affecting production workloads. For example, you can upgrade the major or minor DB engine version, change database parameters, or make schema changes in the staging environment. You can thoroughly test changes in the green environment. When ready, you can switch over the environments to promote the green environment to be the new production environment. The switchover typically takes under a minute.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + }, "CreateCustomDBEngineVersion": { "name": "CreateCustomDBEngineVersion", "http": { @@ -961,6 +1011,29 @@ ], "documentation": "Creates a new option group. You can create up to 20 option groups.
This command doesn't apply to RDS Custom.
" }, + "DeleteBlueGreenDeployment": { + "name": "DeleteBlueGreenDeployment", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "DeleteBlueGreenDeploymentRequest" + }, + "output": { + "shape": "DeleteBlueGreenDeploymentResponse", + "resultWrapper": "DeleteBlueGreenDeploymentResult" + }, + "errors": [ + { + "shape": "BlueGreenDeploymentNotFoundFault" + }, + { + "shape": "InvalidBlueGreenDeploymentStateFault" + } + ], + "documentation": "Deletes a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + }, "DeleteCustomDBEngineVersion": { "name": "DeleteCustomDBEngineVersion", "http": { @@ -1380,6 +1453,26 @@ }, "documentation": "Lists all of the attributes for a customer account. The attributes include Amazon RDS quotas for the account, such as the number of DB instances allowed. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.
This command doesn't take any parameters.
" }, + "DescribeBlueGreenDeployments": { + "name": "DescribeBlueGreenDeployments", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "DescribeBlueGreenDeploymentsRequest" + }, + "output": { + "shape": "DescribeBlueGreenDeploymentsResponse", + "resultWrapper": "DescribeBlueGreenDeploymentsResult" + }, + "errors": [ + { + "shape": "BlueGreenDeploymentNotFoundFault" + } + ], + "documentation": "Returns information about blue/green deployments.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + }, "DescribeCertificates": { "name": "DescribeCertificates", "http": { @@ -3894,6 +3987,29 @@ ], "documentation": "Stops automated backup replication for a DB instance.
This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.
For more information, see Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.
" }, + "SwitchoverBlueGreenDeployment": { + "name": "SwitchoverBlueGreenDeployment", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "SwitchoverBlueGreenDeploymentRequest" + }, + "output": { + "shape": "SwitchoverBlueGreenDeploymentResponse", + "resultWrapper": "SwitchoverBlueGreenDeploymentResult" + }, + "errors": [ + { + "shape": "BlueGreenDeploymentNotFoundFault" + }, + { + "shape": "InvalidBlueGreenDeploymentStateFault" + } + ], + "documentation": "Switches over a blue/green deployment.
Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + }, "SwitchoverReadReplica": { "name": "SwitchoverReadReplica", "http": { @@ -4257,6 +4373,105 @@ }, "documentation": "" }, + "BlueGreenDeployment": { + "type": "structure", + "members": { + "BlueGreenDeploymentIdentifier": { + "shape": "BlueGreenDeploymentIdentifier", + "documentation": "The system-generated identifier of the blue/green deployment.
" + }, + "BlueGreenDeploymentName": { + "shape": "BlueGreenDeploymentName", + "documentation": "The user-supplied name of the blue/green deployment.
" + }, + "Source": { + "shape": "DatabaseArn", + "documentation": "The source database for the blue/green deployment.
Before switchover, the source database is the production database in the blue environment.
" + }, + "Target": { + "shape": "DatabaseArn", + "documentation": "The target database for the blue/green deployment.
Before switchover, the target database is the clone database in the green environment.
" + }, + "SwitchoverDetails": { + "shape": "SwitchoverDetailList", + "documentation": "The details about each source and target resource in the blue/green deployment.
" + }, + "Tasks": { + "shape": "BlueGreenDeploymentTaskList", + "documentation": "Either tasks to be performed or tasks that have been completed on the target database before switchover.
" + }, + "Status": { + "shape": "BlueGreenDeploymentStatus", + "documentation": "The status of the blue/green deployment.
Values:
PROVISIONING
- Resources are being created in the green environment.
AVAILABLE
- Resources are available in the green environment.
SWITCHOVER_IN_PROGRESS
- The deployment is being switched from the blue environment to the green environment.
SWITCHOVER_COMPLETED
- Switchover from the blue environment to the green environment is complete.
INVALID_CONFIGURATION
- Resources in the green environment are invalid, so switchover isn't possible.
SWITCHOVER_FAILED
- Switchover was attempted but failed.
DELETING
- The blue/green deployment is being deleted.
Additional information about the status of the blue/green deployment.
" + }, + "CreateTime": { + "shape": "TStamp", + "documentation": "Specifies the time when the blue/green deployment was created, in Universal Coordinated Time (UTC).
" + }, + "DeleteTime": { + "shape": "TStamp", + "documentation": "Specifies the time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).
" + }, + "TagList": { + "shape": "TagList" + } + }, + "documentation": "Contains the details about a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + }, + "BlueGreenDeploymentIdentifier": { + "type": "string", + "max": 255, + "min": 1, + "pattern": "[A-Za-z][0-9A-Za-z-:._]*" + }, + "BlueGreenDeploymentList": { + "type": "list", + "member": { + "shape": "BlueGreenDeployment" + } + }, + "BlueGreenDeploymentName": { + "type": "string", + "max": 60, + "min": 1, + "pattern": "[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*" + }, + "BlueGreenDeploymentStatus": { + "type": "string" + }, + "BlueGreenDeploymentStatusDetails": { + "type": "string" + }, + "BlueGreenDeploymentTask": { + "type": "structure", + "members": { + "Name": { + "shape": "BlueGreenDeploymentTaskName", + "documentation": "The name of the blue/green deployment task.
" + }, + "Status": { + "shape": "BlueGreenDeploymentTaskStatus", + "documentation": "The status of the blue/green deployment task.
Values:
PENDING
- The resources are being prepared for deployment.
IN_PROGRESS
- The resource is being deployed.
COMPLETED
- The resource has been deployed.
FAILED
- Deployment of the resource failed.
Contains the details about a task for a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + }, + "BlueGreenDeploymentTaskList": { + "type": "list", + "member": { + "shape": "BlueGreenDeploymentTask" + } + }, + "BlueGreenDeploymentTaskName": { + "type": "string" + }, + "BlueGreenDeploymentTaskStatus": { + "type": "string" + }, "Boolean": { "type": "boolean" }, @@ -4657,6 +4872,47 @@ } } }, + "CreateBlueGreenDeploymentRequest": { + "type": "structure", + "required": [ + "BlueGreenDeploymentName", + "Source" + ], + "members": { + "BlueGreenDeploymentName": { + "shape": "BlueGreenDeploymentName", + "documentation": "The name of the blue/green deployment.
Constraints:
Can't be the same as an existing blue/green deployment name in the same account and Amazon Web Services Region.
The Amazon Resource Name (ARN) of the source production database.
Specify the database that you want to clone. The blue/green deployment creates this database in the green environment. You can make updates to the database in the green environment, such as an engine version upgrade. When you are ready, you can switch the database in the green environment to be the production database.
" + }, + "TargetEngineVersion": { + "shape": "TargetEngineVersion", + "documentation": "The engine version of the database in the green environment.
Specify the engine version to upgrade to in the green environment.
" + }, + "TargetDBParameterGroupName": { + "shape": "TargetDBParameterGroupName", + "documentation": "The DB parameter group associated with the DB instance in the green environment.
To test parameter changes, specify a DB parameter group that is different from the one associated with the source DB instance.
" + }, + "TargetDBClusterParameterGroupName": { + "shape": "TargetDBClusterParameterGroupName", + "documentation": "The DB cluster parameter group associated with the Aurora DB cluster in the green environment.
To test parameter changes, specify a DB cluster parameter group that is different from the one associated with the source DB cluster.
" + }, + "Tags": { + "shape": "TagList", + "documentation": "Tags to assign to the blue/green deployment.
" + } + } + }, + "CreateBlueGreenDeploymentResponse": { + "type": "structure", + "members": { + "BlueGreenDeployment": { + "shape": "BlueGreenDeployment" + } + } + }, "CreateCustomDBEngineVersionMessage": { "type": "structure", "required": [ @@ -4673,7 +4929,7 @@ }, "EngineVersion": { "shape": "CustomEngineVersion", - "documentation": "The name of your CEV. The name format is 19.customized_string
. For example, a valid name is 19.my_cev1
. This setting is required for RDS Custom for Oracle, but optional for Amazon RDS. The combination of Engine
and EngineVersion
is unique per customer per Region.
The name of your CEV. The name format is 19.customized_string. For example, a valid CEV name is 19.my_cev1
. This setting is required for RDS Custom for Oracle, but optional for Amazon RDS. The combination of Engine
and EngineVersion
is unique per customer per Region.
The version number of the database engine to use.
For a list of valid engine versions, use the DescribeDBEngineVersions
operation.
The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
Amazon Aurora
Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.
Amazon RDS Custom for Oracle
A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string
. An example identifier is 19.my_cev1
. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.
Amazon RDS Custom for SQL Server
See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.
MariaDB
For information, see MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.
Microsoft SQL Server
For information, see Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide.
MySQL
For information, see MySQL on Amazon RDS Versions in the Amazon RDS User Guide.
Oracle
For information, see Oracle Database Engine Release Notes in the Amazon RDS User Guide.
PostgreSQL
For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.
" + "documentation": "The version number of the database engine to use.
For a list of valid engine versions, use the DescribeDBEngineVersions
operation.
The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
Amazon Aurora
Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.
Amazon RDS Custom for Oracle
A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1
. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.
Amazon RDS Custom for SQL Server
See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide.
MariaDB
For information, see MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.
Microsoft SQL Server
For information, see Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide.
MySQL
For information, see MySQL on Amazon RDS Versions in the Amazon RDS User Guide.
Oracle
For information, see Oracle Database Engine Release Notes in the Amazon RDS User Guide.
PostgreSQL
For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide.
" }, "AutoMinorVersionUpgrade": { "shape": "BooleanOptional", @@ -7888,6 +8144,36 @@ "locationName": "DBSubnetGroup" } }, + "DatabaseArn": { + "type": "string", + "max": 2048, + "min": 1, + "pattern": "^arn:[A-Za-z][0-9A-Za-z-:._]*" + }, + "DeleteBlueGreenDeploymentRequest": { + "type": "structure", + "required": [ + "BlueGreenDeploymentIdentifier" + ], + "members": { + "BlueGreenDeploymentIdentifier": { + "shape": "BlueGreenDeploymentIdentifier", + "documentation": "The blue/green deployment identifier of the deployment to be deleted. This parameter isn't case-sensitive.
Constraints:
Must match an existing blue/green deployment identifier.
A value that indicates whether to delete the resources in the green environment.
" + } + } + }, + "DeleteBlueGreenDeploymentResponse": { + "type": "structure", + "members": { + "BlueGreenDeployment": { + "shape": "BlueGreenDeployment" + } + } + }, "DeleteCustomDBEngineVersionMessage": { "type": "structure", "required": [ @@ -8224,6 +8510,40 @@ "members": {}, "documentation": "" }, + "DescribeBlueGreenDeploymentsRequest": { + "type": "structure", + "members": { + "BlueGreenDeploymentIdentifier": { + "shape": "BlueGreenDeploymentIdentifier", + "documentation": "The blue/green deployment identifier. If this parameter is specified, information from only the specific blue/green deployment is returned. This parameter isn't case-sensitive.
Constraints:
If supplied, must match an existing blue/green deployment identifier.
A filter that specifies one or more blue/green deployments to describe.
Supported filters:
blue-green-deployment-identifier
- Accepts system-generated identifiers for blue/green deployments. The results list only includes information about the blue/green deployments with the specified identifiers.
blue-green-deployment-name
- Accepts user-supplied names for blue/green deployments. The results list only includes information about the blue/green deployments with the specified names.
source
- Accepts source databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified source databases.
target
- Accepts target databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified target databases.
An optional pagination token provided by a previous DescribeBlueGreenDeployments
request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords
.
The maximum number of records to include in the response. If more records exist than the specified MaxRecords
value, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
" + } + } + }, + "DescribeBlueGreenDeploymentsResponse": { + "type": "structure", + "members": { + "BlueGreenDeployments": { + "shape": "BlueGreenDeploymentList", + "documentation": "Contains a list of blue/green deployments for the user.
" + }, + "Marker": { + "shape": "String", + "documentation": "A pagination token that can be used in a later DescribeBlueGreenDeployments request.
" + } + } + }, "DescribeCertificatesMessage": { "type": "structure", "members": { @@ -13871,6 +14191,57 @@ "locationName": "Timezone" } }, + "SwitchoverBlueGreenDeploymentRequest": { + "type": "structure", + "required": [ + "BlueGreenDeploymentIdentifier" + ], + "members": { + "BlueGreenDeploymentIdentifier": { + "shape": "BlueGreenDeploymentIdentifier", + "documentation": "The blue/green deployment identifier.
Constraints:
Must match an existing blue/green deployment identifier.
The amount of time, in seconds, for the switchover to complete. The default is 300.
If the switchover takes longer than the specified duration, then any changes are rolled back, and no changes are made to the environments.
" + } + } + }, + "SwitchoverBlueGreenDeploymentResponse": { + "type": "structure", + "members": { + "BlueGreenDeployment": { + "shape": "BlueGreenDeployment" + } + } + }, + "SwitchoverDetail": { + "type": "structure", + "members": { + "SourceMember": { + "shape": "DatabaseArn", + "documentation": "The Amazon Resource Name (ARN) of a resource in the blue environment.
" + }, + "TargetMember": { + "shape": "DatabaseArn", + "documentation": "The Amazon Resource Name (ARN) of a resource in the green environment.
" + }, + "Status": { + "shape": "SwitchoverDetailStatus", + "documentation": "The switchover status of a resource in a blue/green deployment.
Values:
preparing-for-switchover
- The resource is being prepared to switch over.
ready-for-switchover
- The resource is ready to switch over.
switchover-in-progress
- The resource is being switched over.
switchover-completed
- The resource has been switched over.
switchover-failed
- The resource attempted to switch over but failed.
Contains the details about a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + }, + "SwitchoverDetailList": { + "type": "list", + "member": { + "shape": "SwitchoverDetail" + } + }, + "SwitchoverDetailStatus": { + "type": "string" + }, "SwitchoverReadReplicaMessage": { "type": "structure", "required": [ @@ -13891,6 +14262,10 @@ } } }, + "SwitchoverTimeout": { + "type": "integer", + "min": 30 + }, "TStamp": { "type": "timestamp" }, @@ -13906,7 +14281,7 @@ "documentation": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with aws:
or rds:
. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").
Metadata assigned to an Amazon RDS resource consisting of a key-value pair.
" + "documentation": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.
For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.
" }, "TagList": { "type": "list", @@ -13926,6 +14301,24 @@ }, "documentation": "" }, + "TargetDBClusterParameterGroupName": { + "type": "string", + "max": 255, + "min": 1, + "pattern": "[A-Za-z](?!.*--)[0-9A-Za-z-]*[^-]|^default(?!.*--)(?!.*\\.\\.)[0-9A-Za-z-.]*[^-]" + }, + "TargetDBParameterGroupName": { + "type": "string", + "max": 255, + "min": 1, + "pattern": "[A-Za-z](?!.*--)[0-9A-Za-z-]*[^-]|^default(?!.*--)(?!.*\\.\\.)[0-9A-Za-z-.]*[^-]" + }, + "TargetEngineVersion": { + "type": "string", + "max": 64, + "min": 1, + "pattern": "[0-9A-Za-z-_.]+" + }, "TargetGroupList": { "type": "list", "member": { diff --git a/apis/rds-2014-10-31.paginators.json b/apis/rds-2014-10-31.paginators.json index 9086b659a5..b6db47f81c 100644 --- a/apis/rds-2014-10-31.paginators.json +++ b/apis/rds-2014-10-31.paginators.json @@ -1,5 +1,11 @@ { "pagination": { + "DescribeBlueGreenDeployments": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "BlueGreenDeployments" + }, "DescribeCertificates": { "input_token": "Marker", "limit_key": "MaxRecords", diff --git a/apis/textract-2018-06-27.min.json b/apis/textract-2018-06-27.min.json index 4249d8bce7..c20b93fad7 100644 --- a/apis/textract-2018-06-27.min.json +++ b/apis/textract-2018-06-27.min.json @@ -122,29 +122,7 @@ "IdentityDocuments": { "type": "list", "member": { - "type": "structure", - "members": { - "DocumentIndex": { - "type": "integer" - }, - "IdentityDocumentFields": { - "type": "list", - "member": { - "type": "structure", - "members": { - "Type": { - "shape": "S20" - }, - "ValueDetection": { - "shape": "S20" - } - } - } - }, - "Blocks": { - "shape": "Sp" - } - } + "shape": "S1x" } }, "DocumentMetadata": { @@ -278,6 +256,195 @@ } } }, + "GetLendingAnalysis": { + "input": { + "type": "structure", + "required": [ + "JobId" + ], + "members": { + "JobId": {}, + "MaxResults": { + "type": "integer" + }, + "NextToken": {} + } + }, + "output": { + "type": "structure", + "members": { + "DocumentMetadata": { + "shape": "Sn" + }, + "JobStatus": {}, + "NextToken": {}, + "Results": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Page": { + "type": "integer" + }, + "PageClassification": { + "type": "structure", + "required": [ + "PageType", + "PageNumber" + ], + "members": { + "PageType": { + "shape": "S2p" + }, + "PageNumber": { + "shape": "S2p" + } + } + }, + "Extractions": { + "type": "list", + "member": { + "type": "structure", + "members": { + "LendingDocument": { + "type": "structure", + "members": { + "LendingFields": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Type": {}, + "KeyDetection": { + "shape": "S2w" + }, + "ValueDetections": { + "type": "list", + "member": { + "shape": "S2w" + } + } + } + } + }, + "SignatureDetections": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Confidence": { + "type": "float" + }, + "Geometry": { + "shape": "Sv" + } + } + } + } + } + }, + "ExpenseDocument": { + "shape": "S1g" + }, + "IdentityDocument": { + "shape": "S1x" + } + } + } + } + } + } + }, + "Warnings": { + "shape": "S2b" + }, + "StatusMessage": {}, + "AnalyzeLendingModelVersion": {} + } + } + }, + "GetLendingAnalysisSummary": { + "input": { + "type": "structure", + "required": [ + "JobId" + ], + "members": { + "JobId": {} + } + }, + "output": { + "type": "structure", + "members": { + "DocumentMetadata": { + "shape": "Sn" + }, + "JobStatus": {}, + "Summary": { + "type": "structure", + "members": { + "DocumentGroups": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Type": {}, + "SplitDocuments": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Index": { + "type": "integer" + }, + "Pages": { + "type": "list", + "member": { + "type": "integer" + } + } + } + } + }, + "DetectedSignatures": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Page": { + "type": "integer" + } + } + } + }, + "UndetectedSignatures": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Page": { + "type": "integer" + } + } + } + } + } + } + }, + "UndetectedDocumentTypes": { + "type": "list", + "member": {} + } + } + }, + "Warnings": { + "shape": "S2b" + }, + "StatusMessage": {}, + "AnalyzeLendingModelVersion": {} + } + } + }, "StartDocumentAnalysis": { "input": { "type": "structure", @@ -287,7 +454,7 @@ ], "members": { "DocumentLocation": { - "shape": "S2l" + "shape": "S3e" }, "FeatureTypes": { "shape": "S8" @@ -295,10 +462,10 @@ "ClientRequestToken": {}, "JobTag": {}, "NotificationChannel": { - "shape": "S2o" + "shape": "S3h" }, "OutputConfig": { - "shape": "S2r" + "shape": "S3k" }, "KMSKeyId": {}, "QueriesConfig": { @@ -321,15 +488,15 @@ ], "members": { "DocumentLocation": { - "shape": "S2l" + "shape": "S3e" }, "ClientRequestToken": {}, "JobTag": {}, "NotificationChannel": { - "shape": "S2o" + "shape": "S3h" }, "OutputConfig": { - "shape": "S2r" + "shape": "S3k" }, "KMSKeyId": {} } @@ -349,15 +516,43 @@ ], "members": { "DocumentLocation": { - "shape": "S2l" + "shape": "S3e" }, "ClientRequestToken": {}, "JobTag": {}, "NotificationChannel": { - "shape": "S2o" + "shape": "S3h" }, "OutputConfig": { - "shape": "S2r" + "shape": "S3k" + }, + "KMSKeyId": {} + } + }, + "output": { + "type": "structure", + "members": { + "JobId": {} + } + } + }, + "StartLendingAnalysis": { + "input": { + "type": "structure", + "required": [ + "DocumentLocation" + ], + "members": { + "DocumentLocation": { + "shape": "S3e" + }, + "ClientRequestToken": {}, + "JobTag": {}, + "NotificationChannel": { + "shape": "S3h" + }, + "OutputConfig": { + "shape": "S3k" }, "KMSKeyId": {} } @@ -523,39 +718,42 @@ "S1f": { "type": "list", "member": { - "type": "structure", - "members": { - "ExpenseIndex": { - "type": "integer" - }, - "SummaryFields": { - "shape": "S1h" - }, - "LineItemGroups": { - "type": "list", - "member": { - "type": "structure", - "members": { - "LineItemGroupIndex": { - "type": "integer" - }, - "LineItems": { - "type": "list", - "member": { - "type": "structure", - "members": { - "LineItemExpenseFields": { - "shape": "S1h" - } + "shape": "S1g" + } + }, + "S1g": { + "type": "structure", + "members": { + "ExpenseIndex": { + "type": "integer" + }, + "SummaryFields": { + "shape": "S1h" + }, + "LineItemGroups": { + "type": "list", + "member": { + "type": "structure", + "members": { + "LineItemGroupIndex": { + "type": "integer" + }, + "LineItems": { + "type": "list", + "member": { + "type": "structure", + "members": { + "LineItemExpenseFields": { + "shape": "S1h" } } } } } - }, - "Blocks": { - "shape": "Sp" } + }, + "Blocks": { + "shape": "Sp" } } }, @@ -619,6 +817,31 @@ } } }, + "S1x": { + "type": "structure", + "members": { + "DocumentIndex": { + "type": "integer" + }, + "IdentityDocumentFields": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Type": { + "shape": "S20" + }, + "ValueDetection": { + "shape": "S20" + } + } + } + }, + "Blocks": { + "shape": "Sp" + } + } + }, "S20": { "type": "structure", "required": [ @@ -653,7 +876,32 @@ } } }, - "S2l": { + "S2p": { + "type": "list", + "member": { + "type": "structure", + "members": { + "Value": {}, + "Confidence": { + "type": "float" + } + } + } + }, + "S2w": { + "type": "structure", + "members": { + "Text": {}, + "SelectionStatus": {}, + "Geometry": { + "shape": "Sv" + }, + "Confidence": { + "type": "float" + } + } + }, + "S3e": { "type": "structure", "members": { "S3Object": { @@ -661,7 +909,7 @@ } } }, - "S2o": { + "S3h": { "type": "structure", "required": [ "SNSTopicArn", @@ -672,7 +920,7 @@ "RoleArn": {} } }, - "S2r": { + "S3k": { "type": "structure", "required": [ "S3Bucket" diff --git a/apis/textract-2018-06-27.normal.json b/apis/textract-2018-06-27.normal.json index 18eb803968..46bf945cdd 100644 --- a/apis/textract-2018-06-27.normal.json +++ b/apis/textract-2018-06-27.normal.json @@ -307,6 +307,86 @@ ], "documentation": "Gets the results for an Amazon Textract asynchronous operation that analyzes invoices and receipts. Amazon Textract finds contact information, items purchased, and vendor name, from input invoices and receipts.
You start asynchronous invoice/receipt analysis by calling StartExpenseAnalysis, which returns a job identifier (JobId
). Upon completion of the invoice/receipt analysis, Amazon Textract publishes the completion status to the Amazon Simple Notification Service (Amazon SNS) topic. This topic must be registered in the initial call to StartExpenseAnalysis
. To get the results of the invoice/receipt analysis operation, first ensure that the status value published to the Amazon SNS topic is SUCCEEDED
. If so, call GetExpenseAnalysis
, and pass the job identifier (JobId
) from the initial call to StartExpenseAnalysis
.
Use the MaxResults parameter to limit the number of blocks that are returned. If there are more results than specified in MaxResults
, the value of NextToken
in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetExpenseAnalysis
, and populate the NextToken
request parameter with the token value that's returned from the previous call to GetExpenseAnalysis
.
For more information, see Analyzing Invoices and Receipts.
" }, + "GetLendingAnalysis": { + "name": "GetLendingAnalysis", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "GetLendingAnalysisRequest" + }, + "output": { + "shape": "GetLendingAnalysisResponse" + }, + "errors": [ + { + "shape": "InvalidParameterException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ProvisionedThroughputExceededException" + }, + { + "shape": "InvalidJobIdException" + }, + { + "shape": "InternalServerError" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "InvalidS3ObjectException" + }, + { + "shape": "InvalidKMSKeyException" + } + ], + "documentation": "Gets the results for an Amazon Textract asynchronous operation that analyzes text in a lending document.
You start asynchronous text analysis by calling StartLendingAnalysis
, which returns a job identifier (JobId
). When the text analysis operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartLendingAnalysis
.
To get the results of the text analysis operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLendingAnalysis, and pass the job identifier (JobId
) from the initial call to StartLendingAnalysis
.
Gets summarized results for the StartLendingAnalysis
operation, which analyzes text in a lending document. The returned summary consists of information about documents grouped together by a common document type. Information like detected signatures, page numbers, and split documents is returned with respect to the type of grouped document.
You start asynchronous text analysis by calling StartLendingAnalysis
, which returns a job identifier (JobId
). When the text analysis operation finishes, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that's registered in the initial call to StartLendingAnalysis
.
To get the results of the text analysis operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetLendingAnalysisSummary
, and pass the job identifier (JobId
) from the initial call to StartLendingAnalysis
.
Starts the asynchronous analysis of invoices or receipts for data like contact information, items purchased, and vendor names.
StartExpenseAnalysis
can analyze text in documents that are in JPEG, PNG, and PDF format. The documents must be stored in an Amazon S3 bucket. Use the DocumentLocation parameter to specify the name of your S3 bucket and the name of the document in that bucket.
StartExpenseAnalysis
returns a job identifier (JobId
) that you will provide to GetExpenseAnalysis
to retrieve the results of the operation. When the analysis of the input invoices/receipts is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you provide to the NotificationChannel
. To obtain the results of the invoice and receipt analysis operation, ensure that the status value published to the Amazon SNS topic is SUCCEEDED
. If so, call GetExpenseAnalysis, and pass the job identifier (JobId
) that was returned by your call to StartExpenseAnalysis
.
For more information, see Analyzing Invoices and Receipts.
" + }, + "StartLendingAnalysis": { + "name": "StartLendingAnalysis", + "http": { + "method": "POST", + "requestUri": "/" + }, + "input": { + "shape": "StartLendingAnalysisRequest" + }, + "output": { + "shape": "StartLendingAnalysisResponse" + }, + "errors": [ + { + "shape": "InvalidParameterException" + }, + { + "shape": "InvalidS3ObjectException" + }, + { + "shape": "InvalidKMSKeyException" + }, + { + "shape": "UnsupportedDocumentException" + }, + { + "shape": "DocumentTooLargeException" + }, + { + "shape": "BadDocumentException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ProvisionedThroughputExceededException" + }, + { + "shape": "InternalServerError" + }, + { + "shape": "IdempotentParameterMismatchException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "LimitExceededException" + } + ], + "documentation": "Starts the classification and analysis of an input document. StartLendingAnalysis
initiates the classification and analysis of a packet of lending documents. StartLendingAnalysis
operates on a document file located in an Amazon S3 bucket.
StartLendingAnalysis
can analyze text in documents that are in one of the following formats: JPEG, PNG, TIFF, PDF. Use DocumentLocation
to specify the bucket name and the file name of the document.
StartLendingAnalysis
returns a job identifier (JobId
) that you use to get the results of the operation. When the text analysis is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you specify in NotificationChannel
. To get the results of the text analysis operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If the status is SUCCEEDED you can call either GetLendingAnalysis
or GetLendingAnalysisSummary
and provide the JobId
to obtain the results of the analysis.
If using OutputConfig
to specify an Amazon S3 bucket, the output will be contained within the specified prefix in a directory labeled with the job-id. In the directory there are 3 sub-directories:
detailedResponse (contains the GetLendingAnalysis response)
summaryResponse (for the GetLendingAnalysisSummary response)
splitDocuments (documents split across logical boundaries)
The page a detected signature was found on.
" + } + }, + "documentation": "A structure that holds information regarding a detected signature on a page.
" + }, + "DetectedSignatureList": { + "type": "list", + "member": { + "shape": "DetectedSignature" + } + }, "Document": { "type": "structure", "members": { @@ -757,6 +905,34 @@ }, "documentation": "The input document, either as bytes or as an S3 object.
You pass image bytes to an Amazon Textract API operation by using the Bytes
property. For example, you would use the Bytes
property to pass a document loaded from a local file system. Image bytes passed by using the Bytes
property must be base64 encoded. Your code might not need to encode document file bytes if you're using an AWS SDK to call Amazon Textract API operations.
You pass images stored in an S3 bucket to an Amazon Textract API operation by using the S3Object
property. Documents stored in an S3 bucket don't need to be base64 encoded.
The AWS Region for the S3 bucket that contains the S3 object must match the AWS Region that you use for Amazon Textract operations.
If you use the AWS CLI to call Amazon Textract operations, passing image bytes using the Bytes property isn't supported. You must first upload the document to an Amazon S3 bucket, and then call the operation using the S3Object property.
For Amazon Textract to process an S3 object, the user must have permission to access the S3 object.
" }, + "DocumentGroup": { + "type": "structure", + "members": { + "Type": { + "shape": "NonEmptyString", + "documentation": "The type of document that Amazon Textract has detected. See LINK for a list of all types returned by Textract.
" + }, + "SplitDocuments": { + "shape": "SplitDocumentList", + "documentation": "An array that contains information about the pages of a document, defined by logical boundary.
" + }, + "DetectedSignatures": { + "shape": "DetectedSignatureList", + "documentation": "A list of the detected signatures found in a document group.
" + }, + "UndetectedSignatures": { + "shape": "UndetectedSignatureList", + "documentation": "A list of any expected signatures not found in a document group.
" + } + }, + "documentation": "Summary information about documents grouped by the same document type.
" + }, + "DocumentGroupList": { + "type": "list", + "member": { + "shape": "DocumentGroup" + } + }, "DocumentLocation": { "type": "structure", "members": { @@ -765,7 +941,7 @@ "documentation": "The Amazon S3 bucket that contains the input document.
" } }, - "documentation": "The Amazon S3 bucket that contains the document to be processed. It's used by asynchronous operations such as StartDocumentTextDetection.
The input document can be an image file in JPEG or PNG format. It can also be a file in PDF format.
" + "documentation": "The Amazon S3 bucket that contains the document to be processed. It's used by asynchronous operations.
The input document can be an image file in JPEG or PNG format. It can also be a file in PDF format.
" }, "DocumentMetadata": { "type": "structure", @@ -931,6 +1107,28 @@ }, "documentation": "An object used to store information about the Type detected by Amazon Textract.
" }, + "Extraction": { + "type": "structure", + "members": { + "LendingDocument": { + "shape": "LendingDocument", + "documentation": "Holds the structured data returned by AnalyzeDocument for lending documents.
" + }, + "ExpenseDocument": { + "shape": "ExpenseDocument" + }, + "IdentityDocument": { + "shape": "IdentityDocument" + } + }, + "documentation": "Contains information extracted by an analysis operation after using StartLendingAnalysis.
" + }, + "ExtractionList": { + "type": "list", + "member": { + "shape": "Extraction" + } + }, "FeatureType": { "type": "string", "enum": [ @@ -1126,6 +1324,98 @@ } } }, + "GetLendingAnalysisRequest": { + "type": "structure", + "required": [ + "JobId" + ], + "members": { + "JobId": { + "shape": "JobId", + "documentation": "A unique identifier for the lending or text-detection job. The JobId
is returned from StartLendingAnalysis
. A JobId
value is only valid for 7 days.
The maximum number of results to return per paginated call. The largest value that you can specify is 30. If you specify a value greater than 30, a maximum of 30 results is returned. The default value is 30.
" + }, + "NextToken": { + "shape": "PaginationToken", + "documentation": "If the previous response was incomplete, Amazon Textract returns a pagination token in the response. You can use this pagination token to retrieve the next set of lending results.
" + } + } + }, + "GetLendingAnalysisResponse": { + "type": "structure", + "members": { + "DocumentMetadata": { + "shape": "DocumentMetadata" + }, + "JobStatus": { + "shape": "JobStatus", + "documentation": "The current status of the lending analysis job.
" + }, + "NextToken": { + "shape": "PaginationToken", + "documentation": "If the response is truncated, Amazon Textract returns this token. You can use this token in the subsequent request to retrieve the next set of lending results.
" + }, + "Results": { + "shape": "LendingResultList", + "documentation": "Holds the information returned by one of AmazonTextract's document analysis operations for the pinstripe.
" + }, + "Warnings": { + "shape": "Warnings", + "documentation": "A list of warnings that occurred during the lending analysis operation.
" + }, + "StatusMessage": { + "shape": "StatusMessage", + "documentation": "Returns if the lending analysis job could not be completed. Contains explanation for what error occurred.
" + }, + "AnalyzeLendingModelVersion": { + "shape": "String", + "documentation": "The current model version of the Analyze Lending API.
" + } + } + }, + "GetLendingAnalysisSummaryRequest": { + "type": "structure", + "required": [ + "JobId" + ], + "members": { + "JobId": { + "shape": "JobId", + "documentation": " A unique identifier for the lending or text-detection job. The JobId
is returned from StartLendingAnalysis. A JobId
value is only valid for 7 days.
The current status of the lending analysis job.
" + }, + "Summary": { + "shape": "LendingSummary", + "documentation": "Contains summary information for documents grouped by type.
" + }, + "Warnings": { + "shape": "Warnings", + "documentation": "A list of warnings that occurred during the lending analysis operation.
" + }, + "StatusMessage": { + "shape": "StatusMessage", + "documentation": "Returns if the lending analysis could not be completed. Contains explanation for what error occurred.
" + }, + "AnalyzeLendingModelVersion": { + "shape": "String", + "documentation": "The current model version of the Analyze Lending API.
" + } + } + }, "HumanLoopActivationConditionsEvaluationResults": { "type": "string", "max": 10240 @@ -1281,6 +1571,108 @@ "min": 1, "pattern": "^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$" }, + "LendingDetection": { + "type": "structure", + "members": { + "Text": { + "shape": "String", + "documentation": "The text extracted for a detected value in a lending document.
" + }, + "SelectionStatus": { + "shape": "SelectionStatus", + "documentation": "The selection status of a selection element, such as an option button or check box.
" + }, + "Geometry": { + "shape": "Geometry" + }, + "Confidence": { + "shape": "Percent", + "documentation": "The confidence level for the text of a detected value in a lending document.
" + } + }, + "documentation": "The results extracted for a lending document.
" + }, + "LendingDetectionList": { + "type": "list", + "member": { + "shape": "LendingDetection" + } + }, + "LendingDocument": { + "type": "structure", + "members": { + "LendingFields": { + "shape": "LendingFieldList", + "documentation": "An array of LendingField objects.
" + }, + "SignatureDetections": { + "shape": "SignatureDetectionList", + "documentation": "A list of signatures detected in a lending document.
" + } + }, + "documentation": "Holds the structured data returned by AnalyzeDocument for lending documents.
" + }, + "LendingField": { + "type": "structure", + "members": { + "Type": { + "shape": "String", + "documentation": "The type of the lending document.
" + }, + "KeyDetection": { + "shape": "LendingDetection" + }, + "ValueDetections": { + "shape": "LendingDetectionList", + "documentation": "An array of LendingDetection objects.
" + } + }, + "documentation": "Holds the normalized key-value pairs returned by AnalyzeDocument, including the document type, detected text, and geometry.
" + }, + "LendingFieldList": { + "type": "list", + "member": { + "shape": "LendingField" + } + }, + "LendingResult": { + "type": "structure", + "members": { + "Page": { + "shape": "UInteger", + "documentation": "The page number for a page, with regard to whole submission.
" + }, + "PageClassification": { + "shape": "PageClassification", + "documentation": "The classifier result for a given page.
" + }, + "Extractions": { + "shape": "ExtractionList", + "documentation": "An array of Extraction to hold structured data. e.g. normalized key value pairs instead of raw OCR detections .
" + } + }, + "documentation": "Contains the detections for each page analyzed through the Analyze Lending API.
" + }, + "LendingResultList": { + "type": "list", + "member": { + "shape": "LendingResult" + } + }, + "LendingSummary": { + "type": "structure", + "members": { + "DocumentGroups": { + "shape": "DocumentGroupList", + "documentation": "Contains an array of all DocumentGroup objects.
" + }, + "UndetectedDocumentTypes": { + "shape": "UndetectedDocumentTypeList", + "documentation": "UndetectedDocumentTypes.
" + } + }, + "documentation": "Contains information regarding DocumentGroups and UndetectedDocumentTypes.
" + }, "LineItemFields": { "type": "structure", "members": { @@ -1355,7 +1747,7 @@ "documentation": "The Amazon Resource Name (ARN) of an IAM role that gives Amazon Textract publishing permissions to the Amazon SNS topic.
" } }, - "documentation": "The Amazon Simple Notification Service (Amazon SNS) topic to which Amazon Textract publishes the completion status of an asynchronous document operation, such as StartDocumentTextDetection.
" + "documentation": "The Amazon Simple Notification Service (Amazon SNS) topic to which Amazon Textract publishes the completion status of an asynchronous document operation.
" }, "OutputConfig": { "type": "structure", @@ -1372,7 +1764,31 @@ "documentation": "The prefix of the object key that the output will be saved to. When not enabled, the prefix will be “textract_output\".
" } }, - "documentation": "Sets whether or not your output will go to a user created bucket. Used to set the name of the bucket, and the prefix on the output file.
OutputConfig
is an optional parameter which lets you adjust where your output will be placed. By default, Amazon Textract will store the results internally and can only be accessed by the Get API operations. With OutputConfig enabled, you can set the name of the bucket the output will be sent to and the file prefix of the results where you can download your results. Additionally, you can set the KMSKeyID
parameter to a customer master key (CMK) to encrypt your output. Without this parameter set Amazon Textract will encrypt server-side using the AWS managed CMK for Amazon S3.
Decryption of Customer Content is necessary for processing of the documents by Amazon Textract. If your account is opted out under an AI services opt out policy then all unencrypted Customer Content is immediately and permanently deleted after the Customer Content has been processed by the service. No copy of of the output is retained by Amazon Textract. For information about how to opt out, see Managing AI services opt-out policy.
For more information on data privacy, see the Data Privacy FAQ.
" + "documentation": "Sets whether or not your output will go to a user created bucket. Used to set the name of the bucket, and the prefix on the output file.
OutputConfig
is an optional parameter which lets you adjust where your output will be placed. By default, Amazon Textract will store the results internally and can only be accessed by the Get API operations. With OutputConfig
enabled, you can set the name of the bucket the output will be sent to the file prefix of the results where you can download your results. Additionally, you can set the KMSKeyID
parameter to a customer master key (CMK) to encrypt your output. Without this parameter set Amazon Textract will encrypt server-side using the AWS managed CMK for Amazon S3.
Decryption of Customer Content is necessary for processing of the documents by Amazon Textract. If your account is opted out under an AI services opt out policy then all unencrypted Customer Content is immediately and permanently deleted after the Customer Content has been processed by the service. No copy of of the output is retained by Amazon Textract. For information about how to opt out, see Managing AI services opt-out policy.
For more information on data privacy, see the Data Privacy FAQ.
" + }, + "PageClassification": { + "type": "structure", + "required": [ + "PageType", + "PageNumber" + ], + "members": { + "PageType": { + "shape": "PredictionList", + "documentation": "The class, or document type, assigned to a detected Page object. The class, or document type, assigned to a detected Page object.
" + }, + "PageNumber": { + "shape": "PredictionList", + "documentation": "The page number the value was detected on, relative to Amazon Textract's starting position.
" + } + }, + "documentation": "The class assigned to a Page object detected in an input document. Contains information regarding the predicted type/class of a document's page and the page number that the Page object was detected on.
" + }, + "PageList": { + "type": "list", + "member": { + "shape": "UInteger" + } }, "Pages": { "type": "list", @@ -1411,6 +1827,26 @@ "shape": "Point" } }, + "Prediction": { + "type": "structure", + "members": { + "Value": { + "shape": "NonEmptyString", + "documentation": "The predicted value of a detected object.
" + }, + "Confidence": { + "shape": "Percent", + "documentation": "Amazon Textract's confidence in its predicted value.
" + } + }, + "documentation": "Contains information regarding predicted values returned by Amazon Textract operations, including the predicted value and the confidence in the predicted value.
" + }, + "PredictionList": { + "type": "list", + "member": { + "shape": "Prediction" + } + }, "Queries": { "type": "list", "member": { @@ -1557,6 +1993,45 @@ "NOT_SELECTED" ] }, + "SignatureDetection": { + "type": "structure", + "members": { + "Confidence": { + "shape": "Percent", + "documentation": "The confidence, from 0 to 100, in the predicted values for a detected signature.
" + }, + "Geometry": { + "shape": "Geometry" + } + }, + "documentation": "Information regarding a detected signature on a page.
" + }, + "SignatureDetectionList": { + "type": "list", + "member": { + "shape": "SignatureDetection" + } + }, + "SplitDocument": { + "type": "structure", + "members": { + "Index": { + "shape": "UInteger", + "documentation": "The index for a given document in a DocumentGroup of a specific Type.
" + }, + "Pages": { + "shape": "PageList", + "documentation": "An array of page numbers for a for a given document, ordered by logical boundary.
" + } + }, + "documentation": "Contains information about the pages of a document, defined by logical boundary.
" + }, + "SplitDocumentList": { + "type": "list", + "member": { + "shape": "SplitDocument" + } + }, "StartDocumentAnalysisRequest": { "type": "structure", "required": [ @@ -1688,6 +2163,44 @@ } } }, + "StartLendingAnalysisRequest": { + "type": "structure", + "required": [ + "DocumentLocation" + ], + "members": { + "DocumentLocation": { + "shape": "DocumentLocation" + }, + "ClientRequestToken": { + "shape": "ClientRequestToken", + "documentation": "The idempotent token that you use to identify the start request. If you use the same token with multiple StartLendingAnalysis
requests, the same JobId
is returned. Use ClientRequestToken
to prevent the same job from being accidentally started more than once. For more information, see Calling Amazon Textract Asynchronous Operations.
An identifier that you specify to be included in the completion notification published to the Amazon SNS topic. For example, you can use JobTag
to identify the type of document that the completion notification corresponds to (such as a tax form or a receipt).
The KMS key used to encrypt the inference results. This can be in either Key ID or Key Alias format. When a KMS key is provided, the KMS key will be used for server-side encryption of the objects in the customer bucket. When this parameter is not enabled, the result will be encrypted server side, using SSE-S3.
" + } + } + }, + "StartLendingAnalysisResponse": { + "type": "structure", + "members": { + "JobId": { + "shape": "JobId", + "documentation": "A unique identifier for the lending or text-detection job. The JobId
is returned from StartLendingAnalysis
. A JobId
value is only valid for 7 days.
The page where a signature was expected but not found.
" + } + }, + "documentation": "A structure containing information about an undetected signature on a page where it was expected but not found.
" + }, + "UndetectedSignatureList": { + "type": "list", + "member": { + "shape": "UndetectedSignature" + } + }, "ValueType": { "type": "string", "enum": [ diff --git a/apis/transcribe-2017-10-26.min.json b/apis/transcribe-2017-10-26.min.json index 6a209c4c48..5bf0e63016 100644 --- a/apis/transcribe-2017-10-26.min.json +++ b/apis/transcribe-2017-10-26.min.json @@ -24,14 +24,15 @@ "CategoryName": {}, "Rules": { "shape": "S3" - } + }, + "InputType": {} } }, "output": { "type": "structure", "members": { "CategoryProperties": { - "shape": "Sl" + "shape": "Sm" } } } @@ -50,10 +51,10 @@ "BaseModelName": {}, "ModelName": {}, "InputDataConfig": { - "shape": "Sr" + "shape": "Ss" }, "Tags": { - "shape": "Su" + "shape": "Sv" } } }, @@ -64,7 +65,7 @@ "BaseModelName": {}, "ModelName": {}, "InputDataConfig": { - "shape": "Sr" + "shape": "Ss" }, "ModelStatus": {} } @@ -83,7 +84,7 @@ "LanguageCode": {}, "VocabularyFileUri": {}, "Tags": { - "shape": "Su" + "shape": "Sv" } } }, @@ -111,11 +112,11 @@ "VocabularyName": {}, "LanguageCode": {}, "Phrases": { - "shape": "S17" + "shape": "S18" }, "VocabularyFileUri": {}, "Tags": { - "shape": "Su" + "shape": "Sv" } } }, @@ -143,11 +144,11 @@ "VocabularyFilterName": {}, "LanguageCode": {}, "Words": { - "shape": "S1c" + "shape": "S1d" }, "VocabularyFilterFileUri": {}, "Tags": { - "shape": "Su" + "shape": "Sv" } } }, @@ -272,7 +273,7 @@ "type": "structure", "members": { "LanguageModel": { - "shape": "S1t" + "shape": "S1u" } } } @@ -291,7 +292,7 @@ "type": "structure", "members": { "CategoryProperties": { - "shape": "Sl" + "shape": "Sm" } } } @@ -310,7 +311,7 @@ "type": "structure", "members": { "CallAnalyticsJob": { - "shape": "S1y" + "shape": "S1z" } } } @@ -329,7 +330,7 @@ "type": "structure", "members": { "MedicalTranscriptionJob": { - "shape": "S2k" + "shape": "S2l" } } } @@ -372,7 +373,7 @@ "type": "structure", "members": { "TranscriptionJob": { - "shape": "S2y" + "shape": "S2z" } } } @@ -440,7 +441,7 @@ "Categories": { "type": "list", "member": { - "shape": "Sl" + "shape": "Sm" } } } @@ -506,7 +507,7 @@ "Models": { "type": "list", "member": { - "shape": "S1t" + "shape": "S1u" } } } @@ -575,7 +576,7 @@ "Status": {}, "NextToken": {}, "Vocabularies": { - "shape": "S3x" + "shape": "S3y" } } } @@ -595,7 +596,7 @@ "members": { "ResourceArn": {}, "Tags": { - "shape": "Su" + "shape": "Sv" } } } @@ -637,10 +638,10 @@ "FailureReason": {}, "OutputLocationType": {}, "ContentRedaction": { - "shape": "S27" + "shape": "S28" }, "ModelSettings": { - "shape": "S30" + "shape": "S31" }, "IdentifyLanguage": { "type": "boolean" @@ -652,7 +653,7 @@ "type": "float" }, "LanguageCodes": { - "shape": "S32" + "shape": "S33" } } } @@ -678,7 +679,7 @@ "Status": {}, "NextToken": {}, "Vocabularies": { - "shape": "S3x" + "shape": "S3y" } } } @@ -724,16 +725,16 @@ "members": { "CallAnalyticsJobName": {}, "Media": { - "shape": "S22" + "shape": "S23" }, "OutputLocation": {}, "OutputEncryptionKMSKeyId": {}, "DataAccessRoleArn": {}, "Settings": { - "shape": "S25" + "shape": "S26" }, "ChannelDefinitions": { - "shape": "S2f" + "shape": "S2g" } } }, @@ -741,7 +742,7 @@ "type": "structure", "members": { "CallAnalyticsJob": { - "shape": "S1y" + "shape": "S1z" } } } @@ -765,22 +766,22 @@ }, "MediaFormat": {}, "Media": { - "shape": "S22" + "shape": "S23" }, "OutputBucketName": {}, "OutputKey": {}, "OutputEncryptionKMSKeyId": {}, "KMSEncryptionContext": { - "shape": "S4i" + "shape": "S4j" }, "Settings": { - "shape": "S2o" + "shape": "S2p" }, "ContentIdentificationType": {}, "Specialty": {}, "Type": {}, "Tags": { - "shape": "Su" + "shape": "Sv" } } }, @@ -788,7 +789,7 @@ "type": "structure", "members": { "MedicalTranscriptionJob": { - "shape": "S2k" + "shape": "S2l" } } } @@ -808,25 +809,25 @@ }, "MediaFormat": {}, "Media": { - "shape": "S22" + "shape": "S23" }, "OutputBucketName": {}, "OutputKey": {}, "OutputEncryptionKMSKeyId": {}, "KMSEncryptionContext": { - "shape": "S4i" + "shape": "S4j" }, "Settings": { - "shape": "S2z" + "shape": "S30" }, "ModelSettings": { - "shape": "S30" + "shape": "S31" }, "JobExecutionSettings": { - "shape": "S31" + "shape": "S32" }, "ContentRedaction": { - "shape": "S27" + "shape": "S28" }, "IdentifyLanguage": { "type": "boolean" @@ -835,13 +836,13 @@ "type": "boolean" }, "LanguageOptions": { - "shape": "S2c" + "shape": "S2d" }, "Subtitles": { "type": "structure", "members": { "Formats": { - "shape": "S36" + "shape": "S37" }, "OutputStartIndex": { "type": "integer" @@ -849,10 +850,10 @@ } }, "Tags": { - "shape": "Su" + "shape": "Sv" }, "LanguageIdSettings": { - "shape": "S2d" + "shape": "S2e" } } }, @@ -860,7 +861,7 @@ "type": "structure", "members": { "TranscriptionJob": { - "shape": "S2y" + "shape": "S2z" } } } @@ -875,7 +876,7 @@ "members": { "ResourceArn": {}, "Tags": { - "shape": "Su" + "shape": "Sv" } } }, @@ -915,14 +916,15 @@ "CategoryName": {}, "Rules": { "shape": "S3" - } + }, + "InputType": {} } }, "output": { "type": "structure", "members": { "CategoryProperties": { - "shape": "Sl" + "shape": "Sm" } } } @@ -964,7 +966,7 @@ "VocabularyName": {}, "LanguageCode": {}, "Phrases": { - "shape": "S17" + "shape": "S18" }, "VocabularyFileUri": {} } @@ -990,7 +992,7 @@ "members": { "VocabularyFilterName": {}, "Words": { - "shape": "S1c" + "shape": "S1d" }, "VocabularyFilterFileUri": {} } @@ -1132,7 +1134,7 @@ } } }, - "Sl": { + "Sm": { "type": "structure", "members": { "CategoryName": {}, @@ -1144,10 +1146,11 @@ }, "LastUpdateTime": { "type": "timestamp" - } + }, + "InputType": {} } }, - "Sr": { + "Ss": { "type": "structure", "required": [ "S3Uri", @@ -1159,7 +1162,7 @@ "DataAccessRoleArn": {} } }, - "Su": { + "Sv": { "type": "list", "member": { "type": "structure", @@ -1173,15 +1176,15 @@ } } }, - "S17": { + "S18": { "type": "list", "member": {} }, - "S1c": { + "S1d": { "type": "list", "member": {} }, - "S1t": { + "S1u": { "type": "structure", "members": { "ModelName": {}, @@ -1199,11 +1202,11 @@ }, "FailureReason": {}, "InputDataConfig": { - "shape": "Sr" + "shape": "Ss" } } }, - "S1y": { + "S1z": { "type": "structure", "members": { "CallAnalyticsJobName": {}, @@ -1214,10 +1217,10 @@ }, "MediaFormat": {}, "Media": { - "shape": "S22" + "shape": "S23" }, "Transcript": { - "shape": "S23" + "shape": "S24" }, "StartTime": { "type": "timestamp" @@ -1234,28 +1237,28 @@ "type": "float" }, "Settings": { - "shape": "S25" + "shape": "S26" }, "ChannelDefinitions": { - "shape": "S2f" + "shape": "S2g" } } }, - "S22": { + "S23": { "type": "structure", "members": { "MediaFileUri": {}, "RedactedMediaFileUri": {} } }, - "S23": { + "S24": { "type": "structure", "members": { "TranscriptFileUri": {}, "RedactedTranscriptFileUri": {} } }, - "S25": { + "S26": { "type": "structure", "members": { "VocabularyName": {}, @@ -1263,17 +1266,17 @@ "VocabularyFilterMethod": {}, "LanguageModelName": {}, "ContentRedaction": { - "shape": "S27" + "shape": "S28" }, "LanguageOptions": { - "shape": "S2c" + "shape": "S2d" }, "LanguageIdSettings": { - "shape": "S2d" + "shape": "S2e" } } }, - "S27": { + "S28": { "type": "structure", "required": [ "RedactionType", @@ -1288,11 +1291,11 @@ } } }, - "S2c": { + "S2d": { "type": "list", "member": {} }, - "S2d": { + "S2e": { "type": "map", "key": {}, "value": { @@ -1304,7 +1307,7 @@ } } }, - "S2f": { + "S2g": { "type": "list", "member": { "type": "structure", @@ -1316,7 +1319,7 @@ } } }, - "S2k": { + "S2l": { "type": "structure", "members": { "MedicalTranscriptionJobName": {}, @@ -1327,7 +1330,7 @@ }, "MediaFormat": {}, "Media": { - "shape": "S22" + "shape": "S23" }, "Transcript": { "type": "structure", @@ -1346,17 +1349,17 @@ }, "FailureReason": {}, "Settings": { - "shape": "S2o" + "shape": "S2p" }, "ContentIdentificationType": {}, "Specialty": {}, "Type": {}, "Tags": { - "shape": "Su" + "shape": "Sv" } } }, - "S2o": { + "S2p": { "type": "structure", "members": { "ShowSpeakerLabels": { @@ -1377,7 +1380,7 @@ "VocabularyName": {} } }, - "S2y": { + "S2z": { "type": "structure", "members": { "TranscriptionJobName": {}, @@ -1388,10 +1391,10 @@ }, "MediaFormat": {}, "Media": { - "shape": "S22" + "shape": "S23" }, "Transcript": { - "shape": "S23" + "shape": "S24" }, "StartTime": { "type": "timestamp" @@ -1404,16 +1407,16 @@ }, "FailureReason": {}, "Settings": { - "shape": "S2z" + "shape": "S30" }, "ModelSettings": { - "shape": "S30" + "shape": "S31" }, "JobExecutionSettings": { - "shape": "S31" + "shape": "S32" }, "ContentRedaction": { - "shape": "S27" + "shape": "S28" }, "IdentifyLanguage": { "type": "boolean" @@ -1422,22 +1425,22 @@ "type": "boolean" }, "LanguageOptions": { - "shape": "S2c" + "shape": "S2d" }, "IdentifiedLanguageScore": { "type": "float" }, "LanguageCodes": { - "shape": "S32" + "shape": "S33" }, "Tags": { - "shape": "Su" + "shape": "Sv" }, "Subtitles": { "type": "structure", "members": { "Formats": { - "shape": "S36" + "shape": "S37" }, "SubtitleFileUris": { "type": "list", @@ -1449,11 +1452,11 @@ } }, "LanguageIdSettings": { - "shape": "S2d" + "shape": "S2e" } } }, - "S2z": { + "S30": { "type": "structure", "members": { "VocabularyName": {}, @@ -1476,13 +1479,13 @@ "VocabularyFilterMethod": {} } }, - "S30": { + "S31": { "type": "structure", "members": { "LanguageModelName": {} } }, - "S31": { + "S32": { "type": "structure", "members": { "AllowDeferredExecution": { @@ -1491,7 +1494,7 @@ "DataAccessRoleArn": {} } }, - "S32": { + "S33": { "type": "list", "member": { "type": "structure", @@ -1503,11 +1506,11 @@ } } }, - "S36": { + "S37": { "type": "list", "member": {} }, - "S3x": { + "S3y": { "type": "list", "member": { "type": "structure", @@ -1521,7 +1524,7 @@ } } }, - "S4i": { + "S4j": { "type": "map", "key": {}, "value": {} diff --git a/apis/transcribe-2017-10-26.normal.json b/apis/transcribe-2017-10-26.normal.json index 02eb702e7c..0dcd79d495 100644 --- a/apis/transcribe-2017-10-26.normal.json +++ b/apis/transcribe-2017-10-26.normal.json @@ -39,7 +39,7 @@ "shape": "ConflictException" } ], - "documentation": "Creates a new Call Analytics category.
All categories are automatically applied to your Call Analytics jobs. Note that in order to apply your categories to your jobs, you must create them before submitting your job request, as categories cannot be applied retroactively.
Call Analytics categories are composed of rules. For each category, you must create between 1 and 20 rules. Rules can include these parameters: , , , and .
To update an existing category, see .
To learn more about:
Call Analytics categories, see Creating categories
Using rules, see Rule criteria and refer to the data type
Call Analytics, see Analyzing call center audio with Call Analytics
Creates a new Call Analytics category.
All categories are automatically applied to your Call Analytics transcriptions. Note that in order to apply categories to your transcriptions, you must create them before submitting your transcription request, as categories cannot be applied retroactively.
When creating a new category, you can use the InputType
parameter to label the category as a batch category (POST_CALL
) or a streaming category (REAL_TIME
). Batch categories can only be applied to batch transcriptions and streaming categories can only be applied to streaming transcriptions. If you do not include InputType
, your category is created as a batch category by default.
Call Analytics categories are composed of rules. For each category, you must create between 1 and 20 rules. Rules can include these parameters: , , , and .
To update an existing category, see .
To learn more about Call Analytics categories, see Creating categories for batch transcriptions and Creating categories for streaming transcriptions.
" }, "CreateLanguageModel": { "name": "CreateLanguageModel", @@ -67,7 +67,7 @@ "shape": "ConflictException" } ], - "documentation": "Creates a new custom language model.
When creating a new language model, you must specify:
If you want a Wideband (audio sample rates over 16,000 Hz) or Narrowband (audio sample rates under 16,000 Hz) base model
The location of your training and tuning files (this must be an Amazon S3 URI)
The language of your model
A unique name for your model
For more information, see Custom language models.
" + "documentation": "Creates a new custom language model.
When creating a new custom language model, you must specify:
If you want a Wideband (audio sample rates over 16,000 Hz) or Narrowband (audio sample rates under 16,000 Hz) base model
The location of your training and tuning files (this must be an Amazon S3 URI)
The language of your model
A unique name for your model
Creates a new custom medical vocabulary.
Prior to creating a new medical vocabulary, you must first upload a text file that contains your new entries, phrases, and terms into an Amazon S3 bucket. Note that this differs from , where you can include a list of terms within your request using the Phrases
flag; CreateMedicalVocabulary
does not support the Phrases
flag.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your vocabulary request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
For more information, see Creating a custom vocabulary.
" + "documentation": "Creates a new custom medical vocabulary.
Before creating a new custom medical vocabulary, you must first upload a text file that contains your new entries, phrases, and terms into an Amazon S3 bucket. Note that this differs from , where you can include a list of terms within your request using the Phrases
flag; CreateMedicalVocabulary
does not support the Phrases
flag.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your custom vocabulary request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
For more information, see Custom vocabularies.
" }, "CreateVocabulary": { "name": "CreateVocabulary", @@ -123,7 +123,7 @@ "shape": "ConflictException" } ], - "documentation": "Creates a new custom vocabulary.
When creating a new vocabulary, you can either upload a text file that contains your new entries, phrases, and terms into an Amazon S3 bucket and include the URI in your request, or you can include a list of terms directly in your request using the Phrases
flag.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your vocabulary request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
For more information, see Creating a custom vocabulary.
" + "documentation": "Creates a new custom vocabulary.
When creating a new custom vocabulary, you can either upload a text file that contains your new entries, phrases, and terms into an Amazon S3 bucket and include the URI in your request. Or you can include a list of terms directly in your request using the Phrases
flag.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your custom vocabulary request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
For more information, see Custom vocabularies.
" }, "CreateVocabularyFilter": { "name": "CreateVocabularyFilter", @@ -151,7 +151,7 @@ "shape": "ConflictException" } ], - "documentation": "Creates a new custom vocabulary filter.
You can use vocabulary filters to mask, delete, or flag specific words from your transcript. Vocabulary filters are commonly used to mask profanity in transcripts.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
For more information, see Using vocabulary filtering with unwanted words.
" + "documentation": "Creates a new custom vocabulary filter.
You can use custom vocabulary filters to mask, delete, or flag specific words from your transcript. Custom vocabulary filters are commonly used to mask profanity in transcripts.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your custom vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
For more information, see Vocabulary filtering.
" }, "DeleteCallAnalyticsCategory": { "name": "DeleteCallAnalyticsCategory", @@ -226,7 +226,7 @@ "shape": "InternalFailureException" } ], - "documentation": "Deletes a custom language model. To use this operation, specify the name of the language model you want to delete using ModelName
. Language model names are case sensitive.
Deletes a custom language model. To use this operation, specify the name of the language model you want to delete using ModelName
. custom language model names are case sensitive.
Deletes a custom medical vocabulary. To use this operation, specify the name of the vocabulary you want to delete using VocabularyName
. Vocabulary names are case sensitive.
Deletes a custom medical vocabulary. To use this operation, specify the name of the custom vocabulary you want to delete using VocabularyName
. Custom vocabulary names are case sensitive.
Deletes a custom vocabulary. To use this operation, specify the name of the vocabulary you want to delete using VocabularyName
. Vocabulary names are case sensitive.
Deletes a custom vocabulary. To use this operation, specify the name of the custom vocabulary you want to delete using VocabularyName
. Custom vocabulary names are case sensitive.
Deletes a vocabulary filter. To use this operation, specify the name of the vocabulary filter you want to delete using VocabularyFilterName
. Vocabulary filter names are case sensitive.
Deletes a custom vocabulary filter. To use this operation, specify the name of the custom vocabulary filter you want to delete using VocabularyFilterName
. Custom vocabulary filter names are case sensitive.
Provides information about the specified custom language model.
This operation also shows if the base language model you used to create your custom language model has been updated. If Amazon Transcribe has updated the base model, you can create a new custom language model using the updated base model.
If you tried to create a new custom language model and the request wasn't successful, you can use DescribeLanguageModel
to help identify the reason for this failure.
To get a list of your custom language models, use the operation.
" + "documentation": "Provides information about the specified custom language model.
This operation also shows if the base language model that you used to create your custom language model has been updated. If Amazon Transcribe has updated the base model, you can create a new custom language model using the updated base model.
If you tried to create a new custom language model and the request wasn't successful, you can use DescribeLanguageModel
to help identify the reason for this failure.
Provides information about the specified medical transcription job.
To view the status of the specified medical transcription job, check the TranscriptionJobStatus
field. If the status is COMPLETED
, the job is finished and you can find the results at the location specified in TranscriptFileUri
. If the status is FAILED
, FailureReason
provides details on why your transcription job failed.
To get a list of your medical transcription jobs, use the operation.
" + "documentation": "Provides information about the specified medical transcription job.
To view the status of the specified medical transcription job, check the TranscriptionJobStatus
field. If the status is COMPLETED
, the job is finished. You can find the results at the location specified in TranscriptFileUri
. If the status is FAILED
, FailureReason
provides details on why your transcription job failed.
To get a list of your medical transcription jobs, use the operation.
" }, "GetMedicalVocabulary": { "name": "GetMedicalVocabulary", @@ -485,7 +485,7 @@ "shape": "BadRequestException" } ], - "documentation": "Provides information about the specified custom medical vocabulary.
To view the status of the specified medical vocabulary, check the VocabularyState
field. If the status is READY
, your vocabulary is available to use. If the status is FAILED
, FailureReason
provides details on why your vocabulary failed.
To get a list of your custom medical vocabularies, use the operation.
" + "documentation": "Provides information about the specified custom medical vocabulary.
To view the status of the specified custom medical vocabulary, check the VocabularyState
field. If the status is READY
, your custom vocabulary is available to use. If the status is FAILED
, FailureReason
provides details on why your vocabulary failed.
To get a list of your custom medical vocabularies, use the operation.
" }, "GetTranscriptionJob": { "name": "GetTranscriptionJob", @@ -513,7 +513,7 @@ "shape": "NotFoundException" } ], - "documentation": "Provides information about the specified transcription job.
To view the status of the specified transcription job, check the TranscriptionJobStatus
field. If the status is COMPLETED
, the job is finished and you can find the results at the location specified in TranscriptFileUri
. If the status is FAILED
, FailureReason
provides details on why your transcription job failed.
If you enabled content redaction, the redacted transcript can be found at the location specified in RedactedTranscriptFileUri
.
To get a list of your transcription jobs, use the operation.
" + "documentation": "Provides information about the specified transcription job.
To view the status of the specified transcription job, check the TranscriptionJobStatus
field. If the status is COMPLETED
, the job is finished. You can find the results at the location specified in TranscriptFileUri
. If the status is FAILED
, FailureReason
provides details on why your transcription job failed.
If you enabled content redaction, the redacted transcript can be found at the location specified in RedactedTranscriptFileUri
.
To get a list of your transcription jobs, use the operation.
" }, "GetVocabulary": { "name": "GetVocabulary", @@ -541,7 +541,7 @@ "shape": "BadRequestException" } ], - "documentation": "Provides information about the specified custom vocabulary.
To view the status of the specified vocabulary, check the VocabularyState
field. If the status is READY
, your vocabulary is available to use. If the status is FAILED
, FailureReason
provides details on why your vocabulary failed.
To get a list of your custom vocabularies, use the operation.
" + "documentation": "Provides information about the specified custom vocabulary.
To view the status of the specified custom vocabulary, check the VocabularyState
field. If the status is READY
, your custom vocabulary is available to use. If the status is FAILED
, FailureReason
provides details on why your custom vocabulary failed.
To get a list of your custom vocabularies, use the operation.
" }, "GetVocabularyFilter": { "name": "GetVocabularyFilter", @@ -569,7 +569,7 @@ "shape": "BadRequestException" } ], - "documentation": "Provides information about the specified custom vocabulary filter.
To view the status of the specified vocabulary filter, check the VocabularyState
field. If the status is READY
, your vocabulary is available to use. If the status is FAILED
, FailureReason
provides details on why your vocabulary filter failed.
To get a list of your custom vocabulary filters, use the operation.
" + "documentation": "Provides information about the specified custom vocabulary filter.
To get a list of your custom vocabulary filters, use the operation.
" }, "ListCallAnalyticsCategories": { "name": "ListCallAnalyticsCategories", @@ -644,7 +644,7 @@ "shape": "InternalFailureException" } ], - "documentation": "Provides a list of custom language models that match the specified criteria. If no criteria are specified, all language models are returned.
To get detailed information about a specific custom language model, use the operation.
" + "documentation": "Provides a list of custom language models that match the specified criteria. If no criteria are specified, all custom language models are returned.
To get detailed information about a specific custom language model, use the operation.
" }, "ListMedicalTranscriptionJobs": { "name": "ListMedicalTranscriptionJobs", @@ -825,7 +825,7 @@ "shape": "ConflictException" } ], - "documentation": "Transcribes the audio from a customer service call and applies any additional Request Parameters you choose to include in your request.
In addition to many of the standard transcription features, Call Analytics provides you with call characteristics, call summarization, speaker sentiment, and optional redaction of your text transcript and your audio file. You can also apply custom categories to flag specified conditions. To learn more about these features and insights, refer to Analyzing call center audio with Call Analytics.
If you want to apply categories to your Call Analytics job, you must create them before submitting your job request. Categories cannot be retroactively applied to a job. To create a new category, use the operation. To learn more about Call Analytics categories, see Creating categories.
To make a StartCallAnalyticsJob
request, you must first upload your media file into an Amazon S3 bucket; you can then specify the Amazon S3 location of the file using the Media
parameter.
You must include the following parameters in your StartCallAnalyticsJob
request:
region
: The Amazon Web Services Region where you are making your request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and quotas.
CallAnalyticsJobName
: A custom name you create for your transcription job that is unique within your Amazon Web Services account.
DataAccessRoleArn
: The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files.
Media
(MediaFileUri
or RedactedMediaFileUri
): The Amazon S3 location of your media file.
With Call Analytics, you can redact the audio contained in your media file by including RedactedMediaFileUri
, instead of MediaFileUri
, to specify the location of your input audio. If you choose to redact your audio, you can find your redacted media at the location specified in the RedactedMediaFileUri
field of your response.
Transcribes the audio from a customer service call and applies any additional Request Parameters you choose to include in your request.
In addition to many standard transcription features, Call Analytics provides you with call characteristics, call summarization, speaker sentiment, and optional redaction of your text transcript and your audio file. You can also apply custom categories to flag specified conditions. To learn more about these features and insights, refer to Analyzing call center audio with Call Analytics.
If you want to apply categories to your Call Analytics job, you must create them before submitting your job request. Categories cannot be retroactively applied to a job. To create a new category, use the operation. To learn more about Call Analytics categories, see Creating categories for batch transcriptions and Creating categories for streaming transcriptions.
To make a StartCallAnalyticsJob
request, you must first upload your media file into an Amazon S3 bucket; you can then specify the Amazon S3 location of the file using the Media
parameter.
You must include the following parameters in your StartCallAnalyticsJob
request:
region
: The Amazon Web Services Region where you are making your request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and quotas.
CallAnalyticsJobName
: A custom name that you create for your transcription job that's unique within your Amazon Web Services account.
DataAccessRoleArn
: The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files.
Media
(MediaFileUri
or RedactedMediaFileUri
): The Amazon S3 location of your media file.
With Call Analytics, you can redact the audio contained in your media file by including RedactedMediaFileUri
, instead of MediaFileUri
, to specify the location of your input audio. If you choose to redact your audio, you can find your redacted media at the location specified in the RedactedMediaFileUri
field of your response.
Transcribes the audio from a medical dictation or conversation and applies any additional Request Parameters you choose to include in your request.
In addition to many of the standard transcription features, Amazon Transcribe Medical provides you with a robust medical vocabulary and, optionally, content identification, which adds flags to personal health information (PHI). To learn more about these features, refer to How Amazon Transcribe Medical works.
To make a StartMedicalTranscriptionJob
request, you must first upload your media file into an Amazon S3 bucket; you can then specify the S3 location of the file using the Media
parameter.
You must include the following parameters in your StartMedicalTranscriptionJob
request:
region
: The Amazon Web Services Region where you are making your request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and quotas.
MedicalTranscriptionJobName
: A custom name you create for your transcription job that is unique within your Amazon Web Services account.
Media
(MediaFileUri
): The Amazon S3 location of your media file.
LanguageCode
: This must be en-US
.
OutputBucketName
: The Amazon S3 bucket where you want your transcript stored. If you want your output stored in a sub-folder of this bucket, you must also include OutputKey
.
Specialty
: This must be PRIMARYCARE
.
Type
: Choose whether your audio is a conversation or a dictation.
Transcribes the audio from a medical dictation or conversation and applies any additional Request Parameters you choose to include in your request.
In addition to many standard transcription features, Amazon Transcribe Medical provides you with a robust medical vocabulary and, optionally, content identification, which adds flags to personal health information (PHI). To learn more about these features, refer to How Amazon Transcribe Medical works.
To make a StartMedicalTranscriptionJob
request, you must first upload your media file into an Amazon S3 bucket; you can then specify the S3 location of the file using the Media
parameter.
You must include the following parameters in your StartMedicalTranscriptionJob
request:
region
: The Amazon Web Services Region where you are making your request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and quotas.
MedicalTranscriptionJobName
: A custom name you create for your transcription job that is unique within your Amazon Web Services account.
Media
(MediaFileUri
): The Amazon S3 location of your media file.
LanguageCode
: This must be en-US
.
OutputBucketName
: The Amazon S3 bucket where you want your transcript stored. If you want your output stored in a sub-folder of this bucket, you must also include OutputKey
.
Specialty
: This must be PRIMARYCARE
.
Type
: Choose whether your audio is a conversation or a dictation.
Updates an existing custom medical vocabulary with new values. This operation overwrites all existing information with your new values; you cannot append new terms onto an existing vocabulary.
" + "documentation": "Updates an existing custom medical vocabulary with new values. This operation overwrites all existing information with your new values; you cannot append new terms onto an existing custom vocabulary.
" }, "UpdateVocabulary": { "name": "UpdateVocabulary", @@ -1036,7 +1036,7 @@ "shape": "ConflictException" } ], - "documentation": "Updates an existing custom vocabulary with new values. This operation overwrites all existing information with your new values; you cannot append new terms onto an existing vocabulary.
" + "documentation": "Updates an existing custom vocabulary with new values. This operation overwrites all existing information with your new values; you cannot append new terms onto an existing custom vocabulary.
" }, "UpdateVocabularyFilter": { "name": "UpdateVocabularyFilter", @@ -1064,7 +1064,7 @@ "shape": "NotFoundException" } ], - "documentation": "Updates an existing custom vocabulary filter with a new list of words. The new list you provide overwrites all previous entries; you cannot append new terms onto an existing vocabulary filter.
" + "documentation": "Updates an existing custom vocabulary filter with a new list of words. The new list you provide overwrites all previous entries; you cannot append new terms onto an existing custom vocabulary filter.
" } }, "shapes": { @@ -1081,14 +1081,14 @@ }, "First": { "shape": "TimestampMilliseconds", - "documentation": "The time, in milliseconds, from the start of your media file until the value you specify in which Amazon Transcribe searches for your specified criteria.
" + "documentation": "The time, in milliseconds, from the start of your media file until the specified value. Amazon Transcribe searches for your specified criteria in this time segment.
" }, "Last": { "shape": "TimestampMilliseconds", - "documentation": "The time, in milliseconds, from the value you specify until the end of your media file in which Amazon Transcribe searches for your specified criteria.
" + "documentation": "The time, in milliseconds, from the specified value until the end of your media file. Amazon Transcribe searches for your specified criteria in this time segment.
" } }, - "documentation": "A time range, in milliseconds, between two points in your media file.
You can use StartTime
and EndTime
to search a custom segment. For example, setting StartTime
to 10000 and EndTime
to 50000 only searches for your specified criteria in the audio contained between the 10,000 millisecond mark and the 50,000 millisecond mark of your media file. You must use StartTime
and EndTime
as a set; that is, if you include one, you must include both.
You can use also First
to search from the start of the audio until the time you specify, or Last
to search from the time you specify until the end of the audio. For example, setting First
to 50000 only searches for your specified criteria in the audio contained between the start of the media file to the 50,000 millisecond mark. You can use First
and Last
independently of each other.
If you prefer to use percentage instead of milliseconds, see .
" + "documentation": "A time range, in milliseconds, between two points in your media file.
You can use StartTime
and EndTime
to search a custom segment. For example, setting StartTime
to 10000 and EndTime
to 50000 only searches for your specified criteria in the audio contained between the 10,000 millisecond mark and the 50,000 millisecond mark of your media file. You must use StartTime
and EndTime
as a set; that is, if you include one, you must include both.
You can use also First
to search from the start of the audio until the time that you specify, or Last
to search from the time that you specify until the end of the audio. For example, setting First
to 50000 only searches for your specified criteria in the audio contained between the start of the media file to the 50,000 millisecond mark. You can use First
and Last
independently of each other.
If you prefer to use percentage instead of milliseconds, see .
" }, "BaseModelName": { "type": "string", @@ -1127,14 +1127,15 @@ }, "MediaSampleRateHertz": { "shape": "MediaSampleRateHertz", - "documentation": "The sample rate, in Hertz, of the audio track in your input media file.
" + "documentation": "The sample rate, in hertz, of the audio track in your input media file.
" }, "MediaFormat": { "shape": "MediaFormat", "documentation": "The format of the input media file.
" }, "Media": { - "shape": "Media" + "shape": "Media", + "documentation": "Provides the Amazon S3 location of the media file you used in your Call Analytics request.
" }, "Transcript": { "shape": "Transcript" @@ -1153,11 +1154,11 @@ }, "FailureReason": { "shape": "FailureReason", - "documentation": "If CallAnalyticsJobStatus
is FAILED
, FailureReason
contains information about why the Call Analytics job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.
The media format provided does not match the detected media format
.
The media format specified in MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.
Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid. The sample rate must be between 8,000 and 48,000 Hertz.
The sample rate provided does not match the detected sample rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.
Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
If CallAnalyticsJobStatus
is FAILED
, FailureReason
contains information about why the Call Analytics job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.
The media format provided does not match the detected media format
.
The media format specified in MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.
Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid. The sample rate must be between 8,000 and 48,000 hertz.
The sample rate provided does not match the detected sample rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.
Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path
. For example: arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM ARNs.
" + "documentation": "The Amazon Resource Name (ARN) you included in your request.
" }, "IdentifiedLanguageScore": { "shape": "IdentifiedLanguageScore", @@ -1165,11 +1166,11 @@ }, "Settings": { "shape": "CallAnalyticsJobSettings", - "documentation": "Allows additional optional settings in your request, including content redaction; allows you to apply custom language models, vocabulary filters, and custom vocabularies to your Call Analytics job.
" + "documentation": "Provides information on any additional settings that were included in your request. Additional settings include content redaction and language identification settings.
" }, "ChannelDefinitions": { "shape": "ChannelDefinitions", - "documentation": "Allows you to specify which speaker is on which channel in your Call Analytics job request. For example, if your agent is the first participant to speak, you would set ChannelId
to 0
(to indicate the first channel) and ParticipantRole
to AGENT
(to indicate that it's the agent speaking).
Indicates which speaker is on which channel.
" } }, "documentation": "Provides detailed information about a Call Analytics job.
To view the job's status, refer to CallAnalyticsJobStatus
. If the status is COMPLETED
, the job is finished. You can find your completed transcript at the URI specified in TranscriptFileUri
. If the status is FAILED
, FailureReason
provides details on why your transcription job failed.
If you enabled personally identifiable information (PII) redaction, the redacted transcript appears at the location specified in RedactedTranscriptFileUri
.
If you chose to redact the audio in your media file, you can find your redacted media file at the location specified in the RedactedMediaFileUri
field of your response.
The name of the custom vocabulary you want to include in your Call Analytics transcription request. Vocabulary names are case sensitive.
" + "documentation": "The name of the custom vocabulary you want to include in your Call Analytics transcription request. Custom vocabulary names are case sensitive.
" }, "VocabularyFilterName": { "shape": "VocabularyFilterName", - "documentation": "The name of the custom vocabulary filter you want to include in your Call Analytics transcription request. Vocabulary filter names are case sensitive.
Note that if you include VocabularyFilterName
in your request, you must also include VocabularyFilterMethod
.
The name of the custom vocabulary filter you want to include in your Call Analytics transcription request. Custom vocabulary filter names are case sensitive.
Note that if you include VocabularyFilterName
in your request, you must also include VocabularyFilterMethod
.
Specify how you want your vocabulary filter applied to your transcript.
To replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
Specify how you want your custom vocabulary filter applied to your transcript.
To replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
The name of the custom language model you want to use when processing your Call Analytics job. Note that language model names are case sensitive.
The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the language model isn't applied. There are no errors or warnings associated with a language mismatch.
" + "documentation": "The name of the custom language model you want to use when processing your Call Analytics job. Note that custom language model names are case sensitive.
The language of the specified custom language model must match the language code that you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
" }, "ContentRedaction": { "shape": "ContentRedaction" }, "LanguageOptions": { "shape": "LanguageOptions", - "documentation": "You can specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. If you're unsure what languages are present, do not include this parameter.
Including language options can improve the accuracy of language identification.
For a list of languages supported with Call Analytics, refer to the Supported languages table.
" + "documentation": "You can specify two or more language codes that represent the languages you think may be present in your media. Including more than five is not recommended. If you're unsure what languages are present, do not include this parameter.
Including language options can improve the accuracy of language identification.
For a list of languages supported with Call Analytics, refer to the Supported languages table.
To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample rate of 16,000 Hz or higher.
If using automatic language identification (IdentifyLanguage
) in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
, LanguageModelName
, and VocabularyFilterName
).
You can specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The languages you specify must match the languages of the specified custom language models, custom vocabularies, and custom vocabulary filters.
To include language options using IdentifyLanguage
without including a custom language model, a custom vocabulary, or a custom vocabulary filter, use LanguageOptions
instead of LanguageIdSettings
. Including language options can improve the accuracy of automatic language identification.
If you want to include a custom language model with your request but do not want to use automatic language identification, use instead the parameter with the
LanguageModelName
sub-parameter.
If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use instead the parameter with the
VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
If using automatic language identification in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
, LanguageModelName
, and VocabularyFilterName
).
LanguageIdSettings
supports two to five language codes. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The language codes that you specify must match the languages of the associated custom language models, custom vocabularies, and custom vocabulary filters.
It's recommended that you include LanguageOptions
when using LanguageIdSettings
to ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary that is in en-US
but Amazon Transcribe determines that the language spoken in your media is en-AU
, your custom vocabulary is not applied to your transcription. If you include LanguageOptions
and include en-US
as the only English language dialect, your custom vocabulary is applied to your transcription.
If you want to include a custom language model, custom vocabulary, or custom vocabulary filter with your request but do not want to use automatic language identification, use instead the parameter with the
LanguageModelName
, VocabularyName
, or VocabularyFilterName
sub-parameters.
For a list of languages supported with Call Analytics, refer to Supported languages and language-specific features.
" } }, - "documentation": "Provides additional optional settings for your request, including content redaction, automatic language identification; allows you to apply custom language models, vocabulary filters, and custom vocabularies.
" + "documentation": "Provides additional optional settings for your request, including content redaction, automatic language identification; allows you to apply custom language models, custom vocabulary filters, and custom vocabularies.
" }, "CallAnalyticsJobStatus": { "type": "string", @@ -1286,6 +1287,10 @@ "LastUpdateTime": { "shape": "DateTime", "documentation": "The date and time the specified Call Analytics category was last updated.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-05T12:45:32.691000-07:00
represents 12:45 PM UTC-7 on May 5, 2022.
The input type associated with the specified category. POST_CALL
refers to a category that is applied to batch transcriptions; REAL_TIME
refers to a category that is applied to streaming transcriptions.
Provides you with the properties of the Call Analytics category you specified in your request. This includes the list of rules that define the specified category.
" @@ -1308,7 +1313,7 @@ "documentation": "Specify the speaker you want to define. Omitting this parameter is equivalent to specifying both participants.
" } }, - "documentation": "Allows you to specify which speaker is on which channel. For example, if your agent is the first participant to speak, you would set ChannelId
to 0
(to indicate the first channel) and ParticipantRole
to AGENT
(to indicate that it's the agent speaking).
Makes it possible to specify which speaker is on which channel. For example, if your agent is the first participant to speak, you would set ChannelId
to 0
(to indicate the first channel) and ParticipantRole
to AGENT
(to indicate that it's the agent speaking).
Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL
.
Allows you to redact or flag specified personally identifiable information (PII) in your transcript. If you use ContentRedaction
, you must also include the sub-parameters: PiiEntityTypes
, RedactionOutput
, and RedactionType
.
Makes it possible to redact or flag specified personally identifiable information (PII) in your transcript. If you use ContentRedaction
, you must also include the sub-parameters: PiiEntityTypes
, RedactionOutput
, and RedactionType
.
Rules define a Call Analytics category. When creating a new Call Analytics category, you must create between 1 and 20 rules for that category. For each rule, you specify a filter you want applied to the attributes of a call. For example, you can choose a sentiment filter that detects if a customer's sentiment was positive during the last 30 seconds of the call.
" + "documentation": "Rules define a Call Analytics category. When creating a new category, you must create between 1 and 20 rules for that category. For each rule, you specify a filter you want applied to the attributes of a call. For example, you can choose a sentiment filter that detects if a customer's sentiment was positive during the last 30 seconds of the call.
" + }, + "InputType": { + "shape": "InputType", + "documentation": "Choose whether you want to create a streaming or a batch category for your Call Analytics transcription.
Specifying POST_CALL
assigns your category to batch transcriptions; categories with this input type cannot be applied to streaming (real-time) transcriptions.
Specifying REAL_TIME
assigns your category to streaming transcriptions; categories with this input type cannot be applied to batch (post-call) transcriptions.
If you do not include InputType
, your category is created as a batch category by default.
The language code that represents the language of your model. Each language model must contain terms in only one language, and the language you select for your model must match the language of your training and tuning data.
For a list of supported languages and their associated language codes, refer to the Supported languages table. Note that U.S. English (en-US
) is the only language supported with Amazon Transcribe Medical.
A custom language model can only be used to transcribe files in the same language as the model. For example, if you create a language model using US English (en-US
), you can only apply this model to files that contain English audio.
The language code that represents the language of your model. Each custom language model must contain terms in only one language, and the language you select for your custom language model must match the language of your training and tuning data.
For a list of supported languages and their associated language codes, refer to the Supported languages table. Note that US English (en-US
) is the only language supported with Amazon Transcribe Medical.
A custom language model can only be used to transcribe files in the same language as the model. For example, if you create a custom language model using US English (en-US
), you can only apply this model to files that contain English audio.
A unique name, chosen by you, for your custom language model.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new language model with the same name as an existing language model, you get a ConflictException
error.
A unique name, chosen by you, for your custom language model.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new custom language model with the same name as an existing custom language model, you get a ConflictException
error.
A unique name, chosen by you, for your new custom medical vocabulary.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new medical vocabulary with the same name as an existing medical vocabulary, you get a ConflictException
error.
A unique name, chosen by you, for your new custom medical vocabulary.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new custom medical vocabulary with the same name as an existing custom medical vocabulary, you get a ConflictException
error.
Adds one or more custom tags, each in the form of a key:value pair, to a new medical vocabulary at the time you create this new vocabulary.
To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
" + "documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new custom medical vocabulary at the time you create this new custom vocabulary.
To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
" } } }, @@ -1462,11 +1471,11 @@ }, "LanguageCode": { "shape": "LanguageCode", - "documentation": "The language code you selected for your medical vocabulary. US English (en-US
) is the only language supported with Amazon Transcribe Medical.
The language code you selected for your custom medical vocabulary. US English (en-US
) is the only language supported with Amazon Transcribe Medical.
The processing state of your custom medical vocabulary. If the state is READY
, you can use the vocabulary in a StartMedicalTranscriptionJob
request.
The processing state of your custom medical vocabulary. If the state is READY
, you can use the custom vocabulary in a StartMedicalTranscriptionJob
request.
A unique name, chosen by you, for your new custom vocabulary filter.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new vocabulary filter with the same name as an existing vocabulary filter, you get a ConflictException
error.
A unique name, chosen by you, for your new custom vocabulary filter.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new custom vocabulary filter with the same name as an existing custom vocabulary filter, you get a ConflictException
error.
The language code that represents the language of the entries in your vocabulary filter. Each vocabulary filter must contain terms in only one language.
A vocabulary filter can only be used to transcribe files in the same language as the filter. For example, if you create a vocabulary filter using US English (en-US
), you can only apply this filter to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
" + "documentation": "The language code that represents the language of the entries in your vocabulary filter. Each custom vocabulary filter must contain terms in only one language.
A custom vocabulary filter can only be used to transcribe files in the same language as the filter. For example, if you create a custom vocabulary filter using US English (en-US
), you can only apply this filter to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
" }, "Words": { "shape": "Words", - "documentation": "Use this parameter if you want to create your vocabulary filter by including all desired terms, as comma-separated values, within your request. The other option for creating your vocabulary filter is to save your entries in a text file and upload them to an Amazon S3 bucket, then specify the location of your file using the VocabularyFilterFileUri
parameter.
Note that if you include Words
in your request, you cannot use VocabularyFilterFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
" + "documentation": "Use this parameter if you want to create your custom vocabulary filter by including all desired terms, as comma-separated values, within your request. The other option for creating your vocabulary filter is to save your entries in a text file and upload them to an Amazon S3 bucket, then specify the location of your file using the VocabularyFilterFileUri
parameter.
Note that if you include Words
in your request, you cannot use VocabularyFilterFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your custom vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
" }, "VocabularyFilterFileUri": { "shape": "Uri", @@ -1503,7 +1512,7 @@ }, "Tags": { "shape": "TagList", - "documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new custom vocabulary filter at the time you create this new filter.
To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
" + "documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new custom vocabulary filter at the time you create this new vocabulary filter.
To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
" } } }, @@ -1516,11 +1525,11 @@ }, "LanguageCode": { "shape": "LanguageCode", - "documentation": "The language code you selected for your vocabulary filter.
" + "documentation": "The language code you selected for your custom vocabulary filter.
" }, "LastModifiedTime": { "shape": "DateTime", - "documentation": "The date and time you created your vocabulary filter.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The date and time you created your custom vocabulary filter.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
A unique name, chosen by you, for your new custom vocabulary.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new vocabulary with the same name as an existing vocabulary, you get a ConflictException
error.
A unique name, chosen by you, for your new custom vocabulary.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new custom vocabulary with the same name as an existing custom vocabulary, you get a ConflictException
error.
The language code that represents the language of the entries in your custom vocabulary. Each vocabulary must contain terms in only one language.
A custom vocabulary can only be used to transcribe files in the same language as the vocabulary. For example, if you create a vocabulary using US English (en-US
), you can only apply this vocabulary to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
" + "documentation": "The language code that represents the language of the entries in your custom vocabulary. Each custom vocabulary must contain terms in only one language.
A custom vocabulary can only be used to transcribe files in the same language as the custom vocabulary. For example, if you create a custom vocabulary using US English (en-US
), you can only apply this custom vocabulary to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
" }, "Phrases": { "shape": "Phrases", - "documentation": "Use this parameter if you want to create your vocabulary by including all desired terms, as comma-separated values, within your request. The other option for creating your vocabulary is to save your entries in a text file and upload them to an Amazon S3 bucket, then specify the location of your file using the VocabularyFileUri
parameter.
Note that if you include Phrases
in your request, you cannot use VocabularyFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
" + "documentation": "Use this parameter if you want to create your custom vocabulary by including all desired terms, as comma-separated values, within your request. The other option for creating your custom vocabulary is to save your entries in a text file and upload them to an Amazon S3 bucket, then specify the location of your file using the VocabularyFileUri
parameter.
Note that if you include Phrases
in your request, you cannot use VocabularyFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your custom vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
" }, "VocabularyFileUri": { "shape": "Uri", @@ -1549,7 +1558,7 @@ }, "Tags": { "shape": "TagList", - "documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new custom vocabulary at the time you create this new vocabulary.
To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
" + "documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new custom vocabulary at the time you create this new custom vocabulary.
To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
" } } }, @@ -1566,7 +1575,7 @@ }, "VocabularyState": { "shape": "VocabularyState", - "documentation": "The processing state of your custom vocabulary. If the state is READY
, you can use the vocabulary in a StartTranscriptionJob
request.
The processing state of your custom vocabulary. If the state is READY
, you can use the custom vocabulary in a StartTranscriptionJob
request.
If VocabularyState
is FAILED
, FailureReason
contains information about why the vocabulary request failed. See also: Common Errors.
If VocabularyState
is FAILED
, FailureReason
contains information about why the custom vocabulary request failed. See also: Common Errors.
The name of the custom medical vocabulary you want to delete. Vocabulary names are case sensitive.
" + "documentation": "The name of the custom medical vocabulary you want to delete. Custom medical vocabulary names are case sensitive.
" } } }, @@ -1675,7 +1684,7 @@ "members": { "VocabularyFilterName": { "shape": "VocabularyFilterName", - "documentation": "The name of the custom vocabulary filter you want to delete. Vocabulary filter names are case sensitive.
" + "documentation": "The name of the custom vocabulary filter you want to delete. Custom vocabulary filter names are case sensitive.
" } } }, @@ -1687,7 +1696,7 @@ "members": { "VocabularyName": { "shape": "VocabularyName", - "documentation": "The name of the custom vocabulary you want to delete. Vocabulary names are case sensitive.
" + "documentation": "The name of the custom vocabulary you want to delete. Custom vocabulary names are case sensitive.
" } } }, @@ -1789,7 +1798,7 @@ "members": { "VocabularyName": { "shape": "VocabularyName", - "documentation": "The name of the custom medical vocabulary you want information about. Vocabulary names are case sensitive.
" + "documentation": "The name of the custom medical vocabulary you want information about. Custom medical vocabulary names are case sensitive.
" } } }, @@ -1802,11 +1811,11 @@ }, "LanguageCode": { "shape": "LanguageCode", - "documentation": "The language code you selected for your medical vocabulary. US English (en-US
) is the only language supported with Amazon Transcribe Medical.
The language code you selected for your custom medical vocabulary. US English (en-US
) is the only language supported with Amazon Transcribe Medical.
The processing state of your custom medical vocabulary. If the state is READY
, you can use the vocabulary in a StartMedicalTranscriptionJob
request.
The processing state of your custom medical vocabulary. If the state is READY
, you can use the custom vocabulary in a StartMedicalTranscriptionJob
request.
If VocabularyState
is FAILED
, FailureReason
contains information about why the medical vocabulary request failed. See also: Common Errors.
If VocabularyState
is FAILED
, FailureReason
contains information about why the custom medical vocabulary request failed. See also: Common Errors.
The S3 location where the specified medical vocabulary is stored; use this URI to view or download the vocabulary.
" + "documentation": "The S3 location where the specified custom medical vocabulary is stored; use this URI to view or download the custom vocabulary.
" } } }, @@ -1851,7 +1860,7 @@ "members": { "VocabularyFilterName": { "shape": "VocabularyFilterName", - "documentation": "The name of the custom vocabulary filter you want information about. Vocabulary filter names are case sensitive.
" + "documentation": "The name of the custom vocabulary filter you want information about. Custom vocabulary filter names are case sensitive.
" } } }, @@ -1864,15 +1873,15 @@ }, "LanguageCode": { "shape": "LanguageCode", - "documentation": "The language code you selected for your vocabulary filter.
" + "documentation": "The language code you selected for your custom vocabulary filter.
" }, "LastModifiedTime": { "shape": "DateTime", - "documentation": "The date and time the specified vocabulary filter was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified custom vocabulary filter was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The Amazon S3 location where the vocabulary filter is stored; use this URI to view or download the vocabulary filter.
" + "documentation": "The Amazon S3 location where the custom vocabulary filter is stored; use this URI to view or download the custom vocabulary filter.
" } } }, @@ -1884,7 +1893,7 @@ "members": { "VocabularyName": { "shape": "VocabularyName", - "documentation": "The name of the custom vocabulary you want information about. Vocabulary names are case sensitive.
" + "documentation": "The name of the custom vocabulary you want information about. Custom vocabulary names are case sensitive.
" } } }, @@ -1901,19 +1910,19 @@ }, "VocabularyState": { "shape": "VocabularyState", - "documentation": "The processing state of your custom vocabulary. If the state is READY
, you can use the vocabulary in a StartTranscriptionJob
request.
The processing state of your custom vocabulary. If the state is READY
, you can use the custom vocabulary in a StartTranscriptionJob
request.
The date and time the specified vocabulary was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified custom vocabulary was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
If VocabularyState
is FAILED
, FailureReason
contains information about why the vocabulary request failed. See also: Common Errors.
If VocabularyState
is FAILED
, FailureReason
contains information about why the custom vocabulary request failed. See also: Common Errors.
The S3 location where the vocabulary is stored; use this URI to view or download the vocabulary.
" + "documentation": "The S3 location where the custom vocabulary is stored; use this URI to view or download the custom vocabulary.
" } } }, @@ -1937,50 +1946,57 @@ }, "DataAccessRoleArn": { "shape": "DataAccessRoleArn", - "documentation": "The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path
. For example: arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM ARNs.
" + "documentation": "The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role that you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path
. For example: arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM ARNs.
" } }, "documentation": "Contains the Amazon S3 location of the training data you want to use to create a new custom language model, and permissions to access this location.
When using InputDataConfig
, you must include these sub-parameters: S3Uri
and DataAccessRoleArn
. You can optionally include TuningDataS3Uri
.
Specify the duration of the interruptions in milliseconds. For example, you can flag speech that contains more than 10000 milliseconds of interruptions.
" + "documentation": "Specify the duration of the interruptions in milliseconds. For example, you can flag speech that contains more than 10,000 milliseconds of interruptions.
" }, "ParticipantRole": { "shape": "ParticipantRole", - "documentation": "Specify the interrupter you want to flag. Omitting this parameter is equivalent to specifying both participants.
" + "documentation": "Specify the interrupter that you want to flag. Omitting this parameter is equivalent to specifying both participants.
" }, "AbsoluteTimeRange": { "shape": "AbsoluteTimeRange", - "documentation": "Allows you to specify a time range (in milliseconds) in your audio, during which you want to search for an interruption. See for more detail.
" + "documentation": "Makes it possible to specify a time range (in milliseconds) in your audio, during which you want to search for an interruption. See for more detail.
" }, "RelativeTimeRange": { "shape": "RelativeTimeRange", - "documentation": "Allows you to specify a time range (in percentage) in your media file, during which you want to search for an interruption. See for more detail.
" + "documentation": "Makes it possible to specify a time range (in percentage) in your media file, during which you want to search for an interruption. See for more detail.
" }, "Negate": { "shape": "Boolean", "documentation": "Set to TRUE
to flag speech that does not contain interruptions. Set to FALSE
to flag speech that contains interruptions.
Flag the presence or absence of interruptions in your Call Analytics transcription output.
Rules using InterruptionFilter
are designed to match:
Instances where an agent interrupts a customer
Instances where a customer interrupts an agent
Either participant interrupting the other
A lack of interruptions
See Rule criteria for usage examples.
" + "documentation": "Flag the presence or absence of interruptions in your Call Analytics transcription output.
Rules using InterruptionFilter
are designed to match:
Instances where an agent interrupts a customer
Instances where a customer interrupts an agent
Either participant interrupting the other
A lack of interruptions
See Rule criteria for batch categories for usage examples.
" }, "JobExecutionSettings": { "type": "structure", "members": { "AllowDeferredExecution": { "shape": "Boolean", - "documentation": "Allows you to enable job queuing when your concurrent request limit is exceeded. When AllowDeferredExecution
is set to true
, transcription job requests are placed in a queue until the number of jobs falls below the concurrent request limit. If AllowDeferredExecution
is set to false
and the number of transcription job requests exceed the concurrent request limit, you get a LimitExceededException
error.
Note that job queuing is enabled by default for Call Analytics jobs.
If you include AllowDeferredExecution
in your request, you must also include DataAccessRoleArn
.
Makes it possible to enable job queuing when your concurrent request limit is exceeded. When AllowDeferredExecution
is set to true
, transcription job requests are placed in a queue until the number of jobs falls below the concurrent request limit. If AllowDeferredExecution
is set to false
and the number of transcription job requests exceed the concurrent request limit, you get a LimitExceededException
error.
Note that job queuing is enabled by default for Call Analytics jobs.
If you include AllowDeferredExecution
in your request, you must also include DataAccessRoleArn
.
The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path
. For example: arn:aws:iam::111122223333:role/Admin
. For more information, see IAM ARNs.
Note that if you include DataAccessRoleArn
in your request, you must also include AllowDeferredExecution
.
The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role that you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path
. For example: arn:aws:iam::111122223333:role/Admin
. For more information, see IAM ARNs.
Note that if you include DataAccessRoleArn
in your request, you must also include AllowDeferredExecution
.
Allows you to control how your transcription job is processed. Currently, the only JobExecutionSettings
modification you can choose is enabling job queueing using the AllowDeferredExecution
sub-parameter.
If you include JobExecutionSettings
in your request, you must also include the sub-parameters: AllowDeferredExecution
and DataAccessRoleArn
.
Makes it possible to control how your transcription job is processed. Currently, the only JobExecutionSettings
modification you can choose is enabling job queueing using the AllowDeferredExecution
sub-parameter.
If you include JobExecutionSettings
in your request, you must also include the sub-parameters: AllowDeferredExecution
and DataAccessRoleArn
.
The name of the custom vocabulary you want to use when processing your transcription job. Vocabulary names are case sensitive.
The language of the specified vocabulary must match the language code you specify in your transcription request. If the languages don't match, the vocabulary isn't applied. There are no errors or warnings associated with a language mismatch.
" + "documentation": "The name of the custom vocabulary you want to use when processing your transcription job. Custom vocabulary names are case sensitive.
The language of the specified custom vocabulary must match the language code that you specify in your transcription request. If the languages don't match, the custom vocabulary isn't applied. There are no errors or warnings associated with a language mismatch.
" }, "VocabularyFilterName": { "shape": "VocabularyFilterName", - "documentation": "The name of the custom vocabulary filter you want to use when processing your transcription job. Vocabulary filter names are case sensitive.
The language of the specified vocabulary filter must match the language code you specify in your transcription request. If the languages don't match, the vocabulary filter isn't applied. There are no errors or warnings associated with a language mismatch.
Note that if you include VocabularyFilterName
in your request, you must also include VocabularyFilterMethod
.
The name of the custom vocabulary filter you want to use when processing your transcription job. Custom vocabulary filter names are case sensitive.
The language of the specified custom vocabulary filter must match the language code that you specify in your transcription request. If the languages don't match, the custom vocabulary filter isn't applied. There are no errors or warnings associated with a language mismatch.
Note that if you include VocabularyFilterName
in your request, you must also include VocabularyFilterMethod
.
The name of the custom language model you want to use when processing your transcription job. Note that language model names are case sensitive.
The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the language model isn't applied. There are no errors or warnings associated with a language mismatch.
" + "documentation": "The name of the custom language model you want to use when processing your transcription job. Note that custom language model names are case sensitive.
The language of the specified custom language model must match the language code that you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
" } }, - "documentation": "If using automatic language identification (IdentifyLanguage
) in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
, LanguageModelName
, and VocabularyFilterName
).
You can specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The languages you specify must match the languages of the specified custom language models, custom vocabularies, and custom vocabulary filters.
To include language options using IdentifyLanguage
without including a custom language model, a custom vocabulary, or a custom vocabulary filter, use LanguageOptions
instead of LanguageIdSettings
. Including language options can improve the accuracy of automatic language identification.
If you want to include a custom language model with your request but do not want to use automatic language identification, use instead the parameter with the
LanguageModelName
sub-parameter.
If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use instead the parameter with the
VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
If using automatic language identification in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
, LanguageModelName
, and VocabularyFilterName
). Note that multi-language identification (IdentifyMultipleLanguages
) doesn't support custom language models.
LanguageIdSettings
supports two to five language codes. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The language codes that you specify must match the languages of the associated custom language models, custom vocabularies, and custom vocabulary filters.
It's recommended that you include LanguageOptions
when using LanguageIdSettings
to ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary that is in en-US
but Amazon Transcribe determines that the language spoken in your media is en-AU
, your custom vocabulary is not applied to your transcription. If you include LanguageOptions
and include en-US
as the only English language dialect, your custom vocabulary is applied to your transcription.
If you want to include a custom language model with your request but do not want to use automatic language identification, use instead the parameter with the
LanguageModelName
sub-parameter. If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use instead the parameter with the
VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
The date and time the specified language model was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified custom language model was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The language code used to create your custom language model. Each language model must contain terms in only one language, and the language you select for your model must match the language of your training and tuning data.
For a list of supported languages and their associated language codes, refer to the Supported languages table. Note that U.S. English (en-US
) is the only language supported with Amazon Transcribe Medical.
The language code used to create your custom language model. Each custom language model must contain terms in only one language, and the language you select for your custom language model must match the language of your training and tuning data.
For a list of supported languages and their associated language codes, refer to the Supported languages table. Note that U.S. English (en-US
) is the only language supported with Amazon Transcribe Medical.
Shows if a more current base model is available for use with the specified custom language model.
If false
, your language model is using the most up-to-date base model.
If true
, there is a newer base model available than the one your language model is using.
Note that to update a base model, you must recreate the custom language model using the new base model. Base model upgrades for existing custom language models are not supported.
" + "documentation": "Shows if a more current base model is available for use with the specified custom language model.
If false
, your custom language model is using the most up-to-date base model.
If true
, there is a newer base model available than the one your language model is using.
Note that to update a base model, you must recreate the custom language model using the new base model. Base model upgrades for existing custom language models are not supported.
" }, "FailureReason": { "shape": "FailureReason", @@ -2148,7 +2164,7 @@ }, "MaxResults": { "shape": "MaxResults", - "documentation": "The maximum number of Call Analytics categories to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" + "documentation": "The maximum number of Call Analytics categories to return in each page of results. If there are fewer results than the value that you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" } } }, @@ -2182,7 +2198,7 @@ }, "MaxResults": { "shape": "MaxResults", - "documentation": "The maximum number of Call Analytics jobs to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" + "documentation": "The maximum number of Call Analytics jobs to return in each page of results. If there are fewer results than the value that you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" } } }, @@ -2220,7 +2236,7 @@ }, "MaxResults": { "shape": "MaxResults", - "documentation": "The maximum number of custom language models to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" + "documentation": "The maximum number of custom language models to return in each page of results. If there are fewer results than the value that you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" } } }, @@ -2254,7 +2270,7 @@ }, "MaxResults": { "shape": "MaxResults", - "documentation": "The maximum number of medical transcription jobs to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" + "documentation": "The maximum number of medical transcription jobs to return in each page of results. If there are fewer results than the value that you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" } } }, @@ -2284,11 +2300,11 @@ }, "MaxResults": { "shape": "MaxResults", - "documentation": "The maximum number of custom medical vocabularies to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" + "documentation": "The maximum number of custom medical vocabularies to return in each page of results. If there are fewer results than the value that you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" }, "StateEquals": { "shape": "VocabularyState", - "documentation": "Returns only custom medical vocabularies with the specified state. Vocabularies are ordered by creation date, with the newest vocabulary first. If you don't include StateEquals
, all custom medical vocabularies are returned.
Returns only custom medical vocabularies with the specified state. Custom vocabularies are ordered by creation date, with the newest vocabulary first. If you don't include StateEquals
, all custom medical vocabularies are returned.
Lists all custom medical vocabularies that have the status specified in your request. Vocabularies are ordered by creation date, with the newest vocabulary first.
" + "documentation": "Lists all custom medical vocabularies that have the status specified in your request. Custom vocabularies are ordered by creation date, with the newest vocabulary first.
" }, "NextToken": { "shape": "NextToken", @@ -2321,7 +2337,7 @@ "members": { "ResourceArn": { "shape": "TranscribeArn", - "documentation": "Returns a list of all tags associated with the specified Amazon Resource Name (ARN). ARNs have the format arn:partition:service:region:account-id:resource-type/resource-id
.
For example, arn:aws:transcribe:us-west-2:account-id:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
, medical-transcription-job
, vocabulary
, medical-vocabulary
, vocabulary-filter
, and language-model
.
Returns a list of all tags associated with the specified Amazon Resource Name (ARN). ARNs have the format arn:partition:service:region:account-id:resource-type/resource-id
.
For example, arn:aws:transcribe:us-west-2:111122223333:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
, medical-transcription-job
, vocabulary
, medical-vocabulary
, vocabulary-filter
, and language-model
.
The maximum number of transcription jobs to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" + "documentation": "The maximum number of transcription jobs to return in each page of results. If there are fewer results than the value that you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" } } }, @@ -2385,7 +2401,7 @@ }, "MaxResults": { "shape": "MaxResults", - "documentation": "The maximum number of custom vocabularies to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" + "documentation": "The maximum number of custom vocabularies to return in each page of results. If there are fewer results than the value that you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" }, "StateEquals": { "shape": "VocabularyState", @@ -2423,7 +2439,7 @@ }, "MaxResults": { "shape": "MaxResults", - "documentation": "The maximum number of custom vocabulary filters to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" + "documentation": "The maximum number of custom vocabulary filters to return in each page of results. If there are fewer results than the value that you specify, only the actual results are returned. If you don't specify a value, a default of 5 is used.
" }, "NameContains": { "shape": "VocabularyFilterName", @@ -2468,10 +2484,10 @@ }, "RedactedMediaFileUri": { "shape": "Uri", - "documentation": "The Amazon S3 location of the media file you want to redact. For example:
s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
Note that the Amazon S3 bucket that contains your input media must be located in the same Amazon Web Services Region where you're making your transcription request.
RedactedMediaFileUri
is only supported for Call Analytics (StartCallAnalyticsJob
) transcription requests.
The Amazon S3 location of the media file you want to redact. For example:
s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
Note that the Amazon S3 bucket that contains your input media must be located in the same Amazon Web Services Region where you're making your transcription request.
RedactedMediaFileUri
produces a redacted audio file in addition to a redacted transcript. It is only supported for Call Analytics (StartCallAnalyticsJob
) transcription requests.
Describes the Amazon S3 location of the media file you want to use in your request.
" + "documentation": "Describes the Amazon S3 location of the media file you want to use in your request.
For information on supported media formats, refer to the MediaFormat parameter or the Media formats section in the Amazon S3 Developer Guide.
" }, "MediaFormat": { "type": "string", @@ -2506,7 +2522,7 @@ "members": { "TranscriptFileUri": { "shape": "Uri", - "documentation": "The Amazon S3 location of your transcript. You can use this URI to access or download your transcript.
If you included OutputBucketName
in your transcription job request, this is the URI of that bucket. If you also included OutputKey
in your request, your output is located in the path you specified in your request.
If you didn't include OutputBucketName
in your transcription job request, your transcript is stored in a service-managed bucket, and TranscriptFileUri
provides you with a temporary URI you can use for secure access to your transcript.
Temporary URIs for service-managed Amazon S3 buckets are only valid for 15 minutes. If you get an AccesDenied
error, you can get a new temporary URI by running a GetTranscriptionJob
or ListTranscriptionJob
request.
The Amazon S3 location of your transcript. You can use this URI to access or download your transcript.
Note that this is the Amazon S3 location you specified in your request using the OutputBucketName
parameter.
Provides you with the Amazon S3 URI you can use to access your transcript.
" @@ -2528,7 +2544,7 @@ }, "MediaSampleRateHertz": { "shape": "MedicalMediaSampleRateHertz", - "documentation": "The sample rate, in Hertz, of the audio track in your input media file.
" + "documentation": "The sample rate, in hertz, of the audio track in your input media file.
" }, "MediaFormat": { "shape": "MediaFormat", @@ -2555,15 +2571,15 @@ }, "FailureReason": { "shape": "FailureReason", - "documentation": "If TranscriptionJobStatus
is FAILED
, FailureReason
contains information about why the transcription job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.
The media format provided does not match the detected media format
.
The media format specified in MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.
Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid. The sample rate must be between 16,000 and 48,000 Hertz.
The sample rate provided does not match the detected sample rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.
Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
If TranscriptionJobStatus
is FAILED
, FailureReason
contains information about why the transcription job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.
The media format provided does not match the detected media format
.
The media format specified in MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.
Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid. The sample rate must be between 16,000 and 48,000 hertz.
The sample rate provided does not match the detected sample rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.
Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
Specify additional optional settings in your request, including channel identification, alternative transcriptions, and speaker labeling; allows you to apply custom vocabularies to your medical transcription job.
" + "documentation": "Provides information on any additional settings that were included in your request. Additional settings include channel identification, alternative transcriptions, speaker partitioning, custom vocabularies, and custom vocabulary filters.
" }, "ContentIdentificationType": { "shape": "MedicalContentIdentificationType", - "documentation": "Labels all personal health information (PHI) identified in your transcript. For more information, see Identifying personal health information (PHI) in a transcription.
" + "documentation": "Indicates whether content identification was enabled for your transcription request.
" }, "Specialty": { "shape": "Specialty", @@ -2641,11 +2657,11 @@ "members": { "ShowSpeakerLabels": { "shape": "Boolean", - "documentation": "Enables speaker identification (diarization) in your transcription output. Speaker identification labels the speech from individual speakers in your media file.
If you enable ShowSpeakerLabels
in your request, you must also include MaxSpeakerLabels
.
You can't include both ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a BadRequestException
.
For more information, see Identifying speakers (diarization).
" + "documentation": "Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.
If you enable ShowSpeakerLabels
in your request, you must also include MaxSpeakerLabels
.
You can't include ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a BadRequestException
.
For more information, see Partitioning speakers (diarization).
" }, "MaxSpeakerLabels": { "shape": "MaxSpeakers", - "documentation": "Specify the maximum number of speakers you want to identify in your media.
Note that if your media contains more speakers than the specified number, multiple speakers will be identified as a single speaker.
If you specify the MaxSpeakerLabels
field, you must set the ShowSpeakerLabels
field to true.
Specify the maximum number of speakers you want to partition in your media.
Note that if your media contains more speakers than the specified number, multiple speakers are treated as a single speaker.
If you specify the MaxSpeakerLabels
field, you must set the ShowSpeakerLabels
field to true.
The name of the custom vocabulary you want to use when processing your medical transcription job. Vocabulary names are case sensitive.
The language of the specified vocabulary must match the language code you specify in your transcription request. If the languages don't match, the vocabulary isn't applied. There are no errors or warnings associated with a language mismatch. US English (en-US
) is the only valid language for Amazon Transcribe Medical.
The name of the custom vocabulary you want to use when processing your medical transcription job. Custom vocabulary names are case sensitive.
The language of the specified custom vocabulary must match the language code that you specify in your transcription request. If the languages don't match, the custom vocabulary isn't applied. There are no errors or warnings associated with a language mismatch. US English (en-US
) is the only valid language for Amazon Transcribe Medical.
Allows additional optional settings in your request, including channel identification, alternative transcriptions, and speaker labeling; allows you to apply custom vocabularies to your medical transcription job.
" + "documentation": "Allows additional optional settings in your request, including channel identification, alternative transcriptions, and speaker partitioning. You can use that to apply custom vocabularies to your medical transcription job.
" }, "ModelName": { "type": "string", @@ -2677,7 +2693,7 @@ "members": { "LanguageModelName": { "shape": "ModelName", - "documentation": "The name of the custom language model you want to use when processing your transcription job. Note that language model names are case sensitive.
The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the language model isn't applied. There are no errors or warnings associated with a language mismatch.
" + "documentation": "The name of the custom language model you want to use when processing your transcription job. Note that custom language model names are case sensitive.
The language of the specified custom language model must match the language code that you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
" } }, "documentation": "Provides the name of the custom language model that was included in the specified transcription job.
Only use ModelSettings
with the LanguageModelName
sub-parameter if you're not using automatic language identification (). If using
LanguageIdSettings
in your request, this parameter contains a LanguageModelName
sub-parameter.
Specify the duration, in milliseconds, of the period of silence you want to flag. For example, you can flag a silent period that lasts 30000 milliseconds.
" + "documentation": "Specify the duration, in milliseconds, of the period of silence that you want to flag. For example, you can flag a silent period that lasts 30,000 milliseconds.
" }, "AbsoluteTimeRange": { "shape": "AbsoluteTimeRange", - "documentation": "Allows you to specify a time range (in milliseconds) in your audio, during which you want to search for a period of silence. See for more detail.
" + "documentation": "Makes it possible to specify a time range (in milliseconds) in your audio, during which you want to search for a period of silence. See for more detail.
" }, "RelativeTimeRange": { "shape": "RelativeTimeRange", - "documentation": "Allows you to specify a time range (in percentage) in your media file, during which you want to search for a period of silence. See for more detail.
" + "documentation": "Makes it possible to specify a time range (in percentage) in your media file, during which you want to search for a period of silence. See for more detail.
" }, "Negate": { "shape": "Boolean", "documentation": "Set to TRUE
to flag periods of speech. Set to FALSE
to flag periods of silence
Flag the presence or absence of periods of silence in your Call Analytics transcription output.
Rules using NonTalkTimeFilter
are designed to match:
The presence of silence at specified periods throughout the call
The presence of speech at specified periods throughout the call
See Rule criteria for usage examples.
" + "documentation": "Flag the presence or absence of periods of silence in your Call Analytics transcription output.
Rules using NonTalkTimeFilter
are designed to match:
The presence of silence at specified periods throughout the call
The presence of speech at specified periods throughout the call
See Rule criteria for batch categories for usage examples.
" }, "OutputBucketName": { "type": "string", @@ -2822,14 +2838,14 @@ }, "First": { "shape": "Percentage", - "documentation": "The time, in percentage, from the start of your media file until the value you specify in which Amazon Transcribe searches for your specified criteria.
" + "documentation": "The time, in percentage, from the start of your media file until the specified value. Amazon Transcribe searches for your specified criteria in this time segment.
" }, "Last": { "shape": "Percentage", - "documentation": "The time, in percentage, from the value you specify until the end of your media file in which Amazon Transcribe searches for your specified criteria.
" + "documentation": "The time, in percentage, from the specified value until the end of your media file. Amazon Transcribe searches for your specified criteria in this time segment.
" } }, - "documentation": "A time range, in percentage, between two points in your media file.
You can use StartPercentage
and EndPercentage
to search a custom segment. For example, setting StartPercentage
to 10 and EndPercentage
to 50 only searches for your specified criteria in the audio contained between the 10 percent mark and the 50 percent mark of your media file.
You can use also First
to search from the start of the media file until the time you specify, or Last
to search from the time you specify until the end of the media file. For example, setting First
to 10 only searches for your specified criteria in the audio contained in the first 10 percent of the media file.
If you prefer to use milliseconds instead of percentage, see .
" + "documentation": "A time range, in percentage, between two points in your media file.
You can use StartPercentage
and EndPercentage
to search a custom segment. For example, setting StartPercentage
to 10 and EndPercentage
to 50 only searches for your specified criteria in the audio contained between the 10 percent mark and the 50 percent mark of your media file.
You can use also First
to search from the start of the media file until the time that you specify. Or use Last
to search from the time that you specify until the end of the media file. For example, setting First
to 10 only searches for your specified criteria in the audio contained in the first 10 percent of the media file.
If you prefer to use milliseconds instead of percentage, see .
" }, "Rule": { "type": "structure", @@ -2851,7 +2867,7 @@ "documentation": "Flag the presence or absence of specific sentiments in your Call Analytics transcription output. Refer to for more detail.
" } }, - "documentation": "A rule is a set of criteria you can specify to flag an attribute in your Call Analytics output. Rules define a Call Analytics category.
Rules can include these parameters: , , , and . To learn more about these parameters, refer to Rule criteria.
To learn more about Call Analytics categories, see Creating categories.
To learn more about Call Analytics, see Analyzing call center audio with Call Analytics.
", + "documentation": "A rule is a set of criteria that you can specify to flag an attribute in your Call Analytics output. Rules define a Call Analytics category.
Rules can include these parameters: , , , and .
To learn more about Call Analytics rules and categories, see Creating categories for batch transcriptions and Creating categories for streaming transcriptions.
To learn more about Call Analytics, see Analyzing call center audio with Call Analytics.
", "union": true }, "RuleList": { @@ -2870,26 +2886,26 @@ "members": { "Sentiments": { "shape": "SentimentValueList", - "documentation": "Specify the sentiments you want to flag.
" + "documentation": "Specify the sentiments that you want to flag.
" }, "AbsoluteTimeRange": { "shape": "AbsoluteTimeRange", - "documentation": "Allows you to specify a time range (in milliseconds) in your audio, during which you want to search for the specified sentiments. See for more detail.
" + "documentation": "Makes it possible to specify a time range (in milliseconds) in your audio, during which you want to search for the specified sentiments. See for more detail.
" }, "RelativeTimeRange": { "shape": "RelativeTimeRange", - "documentation": "Allows you to specify a time range (in percentage) in your media file, during which you want to search for the specified sentiments. See for more detail.
" + "documentation": "Makes it possible to specify a time range (in percentage) in your media file, during which you want to search for the specified sentiments. See for more detail.
" }, "ParticipantRole": { "shape": "ParticipantRole", - "documentation": "Specify the participant you want to flag. Omitting this parameter is equivalent to specifying both participants.
" + "documentation": "Specify the participant that you want to flag. Omitting this parameter is equivalent to specifying both participants.
" }, "Negate": { "shape": "Boolean", - "documentation": "Set to TRUE
to flag the sentiments you didn't include in your request. Set to FALSE
to flag the sentiments you specified in your request.
Set to TRUE
to flag the sentiments that you didn't include in your request. Set to FALSE
to flag the sentiments that you specified in your request.
Flag the presence or absence of specific sentiments detected in your Call Analytics transcription output.
Rules using SentimentFilter
are designed to match:
The presence or absence of a positive sentiment felt by the customer, agent, or both at specified points in the call
The presence or absence of a negative sentiment felt by the customer, agent, or both at specified points in the call
The presence or absence of a neutral sentiment felt by the customer, agent, or both at specified points in the call
The presence or absence of a mixed sentiment felt by the customer, the agent, or both at specified points in the call
See Rule criteria for examples.
" + "documentation": "Flag the presence or absence of specific sentiments detected in your Call Analytics transcription output.
Rules using SentimentFilter
are designed to match:
The presence or absence of a positive sentiment felt by the customer, agent, or both at specified points in the call
The presence or absence of a negative sentiment felt by the customer, agent, or both at specified points in the call
The presence or absence of a neutral sentiment felt by the customer, agent, or both at specified points in the call
The presence or absence of a mixed sentiment felt by the customer, the agent, or both at specified points in the call
See Rule criteria for batch categories for usage examples.
" }, "SentimentValue": { "type": "string", @@ -2905,6 +2921,7 @@ "member": { "shape": "SentimentValue" }, + "max": 1, "min": 1 }, "Settings": { @@ -2916,11 +2933,11 @@ }, "ShowSpeakerLabels": { "shape": "Boolean", - "documentation": "Enables speaker identification (diarization) in your transcription output. Speaker identification labels the speech from individual speakers in your media file.
If you enable ShowSpeakerLabels
in your request, you must also include MaxSpeakerLabels
.
You can't include both ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a BadRequestException
.
For more information, see Identifying speakers (diarization).
" + "documentation": "Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.
If you enable ShowSpeakerLabels
in your request, you must also include MaxSpeakerLabels
.
You can't include both ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a BadRequestException
.
For more information, see Partitioning speakers (diarization).
" }, "MaxSpeakerLabels": { "shape": "MaxSpeakers", - "documentation": "Specify the maximum number of speakers you want to identify in your media.
Note that if your media contains more speakers than the specified number, multiple speakers will be identified as a single speaker.
If you specify the MaxSpeakerLabels
field, you must set the ShowSpeakerLabels
field to true.
Specify the maximum number of speakers you want to partition in your media.
Note that if your media contains more speakers than the specified number, multiple speakers are treated as a single speaker.
If you specify the MaxSpeakerLabels
field, you must set the ShowSpeakerLabels
field to true.
Specify how you want your vocabulary filter applied to your transcript.
To replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
Specify how you want your custom vocabulary filter applied to your transcript.
To replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
Allows additional optional settings in your request, including channel identification, alternative transcriptions, and speaker labeling; allows you to apply custom vocabularies to your transcription job.
" + "documentation": "Allows additional optional settings in your request, including channel identification, alternative transcriptions, and speaker partitioning. You can use that to apply custom vocabularies to your transcription job.
" }, "Specialty": { "type": "string", @@ -2963,7 +2980,8 @@ "documentation": "A unique name, chosen by you, for your Call Analytics job.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new job with the same name as an existing job, you get a ConflictException
error.
Describes the Amazon S3 location of the media file you want to use in your Call Analytics request.
" }, "OutputLocation": { "shape": "Uri", @@ -2975,7 +2993,7 @@ }, "DataAccessRoleArn": { "shape": "DataAccessRoleArn", - "documentation": "The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path
. For example: arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM ARNs.
" + "documentation": "The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role that you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path
. For example: arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM ARNs.
" }, "Settings": { "shape": "CallAnalyticsJobSettings", @@ -2983,7 +3001,7 @@ }, "ChannelDefinitions": { "shape": "ChannelDefinitions", - "documentation": "Allows you to specify which speaker is on which channel. For example, if your agent is the first participant to speak, you would set ChannelId
to 0
(to indicate the first channel) and ParticipantRole
to AGENT
(to indicate that it's the agent speaking).
Makes it possible to specify which speaker is on which channel. For example, if your agent is the first participant to speak, you would set ChannelId
to 0
(to indicate the first channel) and ParticipantRole
to AGENT
(to indicate that it's the agent speaking).
A unique name, chosen by you, for your medical transcription job. The name you specify is also used as the default name of your transcription output file. If you want to specify a different name for your transcription output, use the OutputKey
parameter.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new job with the same name as an existing job, you get a ConflictException
error.
A unique name, chosen by you, for your medical transcription job. The name that you specify is also used as the default name of your transcription output file. If you want to specify a different name for your transcription output, use the OutputKey
parameter.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new job with the same name as an existing job, you get a ConflictException
error.
The sample rate, in Hertz, of the audio track in your input media file.
If you don't specify the media sample rate, Amazon Transcribe Medical determines it for you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe Medical; if there's a mismatch between the value you specify and the value detected, your job fails. Therefore, in most cases, it's advised to omit MediaSampleRateHertz
and let Amazon Transcribe Medical determine the sample rate.
The sample rate, in hertz, of the audio track in your input media file.
If you don't specify the media sample rate, Amazon Transcribe Medical determines it for you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe Medical; if there's a mismatch between the value that you specify and the value detected, your job fails. Therefore, in most cases, it's advised to omit MediaSampleRateHertz
and let Amazon Transcribe Medical determine the sample rate.
The name of the Amazon S3 bucket where you want your medical transcription output stored. Do not include the S3://
prefix of the specified bucket.
If you want your output to go to a sub-folder of this bucket, specify it using the OutputKey
parameter; OutputBucketName
only accepts the name of a bucket.
For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
, set OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you want your output stored in S3://DOC-EXAMPLE-BUCKET/test-files/
, set OutputBucketName
to DOC-EXAMPLE-BUCKET
and OutputKey
to test-files/
.
Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3 permissions using the Amazon Web Services Management Console. See also Permissions Required for IAM User Roles.
If you don't specify OutputBucketName
, your transcript is placed in a service-managed Amazon S3 bucket and you are provided with a URI to access your transcript.
The name of the Amazon S3 bucket where you want your medical transcription output stored. Do not include the S3://
prefix of the specified bucket.
If you want your output to go to a sub-folder of this bucket, specify it using the OutputKey
parameter; OutputBucketName
only accepts the name of a bucket.
For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
, set OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you want your output stored in S3://DOC-EXAMPLE-BUCKET/test-files/
, set OutputBucketName
to DOC-EXAMPLE-BUCKET
and OutputKey
to test-files/
.
Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3 permissions using the Amazon Web Services Management Console. See also Permissions Required for IAM User Roles.
" }, "OutputKey": { "shape": "OutputKey", @@ -3044,7 +3062,7 @@ }, "Settings": { "shape": "MedicalTranscriptionSetting", - "documentation": "Specify additional optional settings in your request, including channel identification, alternative transcriptions, and speaker labeling; allows you to apply custom vocabularies to your transcription job.
" + "documentation": "Specify additional optional settings in your request, including channel identification, alternative transcriptions, and speaker partitioning. You can use that to apply custom vocabularies to your transcription job.
" }, "ContentIdentificationType": { "shape": "MedicalContentIdentificationType", @@ -3082,7 +3100,7 @@ "members": { "TranscriptionJobName": { "shape": "TranscriptionJobName", - "documentation": "A unique name, chosen by you, for your transcription job. The name you specify is also used as the default name of your transcription output file. If you want to specify a different name for your transcription output, use the OutputKey
parameter.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new job with the same name as an existing job, you get a ConflictException
error.
A unique name, chosen by you, for your transcription job. The name that you specify is also used as the default name of your transcription output file. If you want to specify a different name for your transcription output, use the OutputKey
parameter.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new job with the same name as an existing job, you get a ConflictException
error.
The sample rate, in Hertz, of the audio track in your input media file.
If you don't specify the media sample rate, Amazon Transcribe determines it for you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe; if there's a mismatch between the value you specify and the value detected, your job fails. Therefore, in most cases, it's advised to omit MediaSampleRateHertz
and let Amazon Transcribe determine the sample rate.
The sample rate, in hertz, of the audio track in your input media file.
If you don't specify the media sample rate, Amazon Transcribe determines it for you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the value that you specify and the value detected, your job fails. In most cases, you can omit MediaSampleRateHertz
and let Amazon Transcribe determine the sample rate.
Specify additional optional settings in your request, including channel identification, alternative transcriptions, speaker labeling; allows you to apply custom vocabularies and vocabulary filters.
If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use Settings
with the VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
If you're using automatic language identification with your request and want to include a custom language model, a custom vocabulary, or a custom vocabulary filter, use instead the parameter with the
LanguageModelName
, VocabularyName
or VocabularyFilterName
sub-parameters.
Specify additional optional settings in your request, including channel identification, alternative transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary filters.
If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use Settings
with the VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
If you're using automatic language identification with your request and want to include a custom language model, a custom vocabulary, or a custom vocabulary filter, use instead the parameter with the
LanguageModelName
, VocabularyName
or VocabularyFilterName
sub-parameters.
Allows you to control how your transcription job is processed. Currently, the only JobExecutionSettings
modification you can choose is enabling job queueing using the AllowDeferredExecution
sub-parameter.
If you include JobExecutionSettings
in your request, you must also include the sub-parameters: AllowDeferredExecution
and DataAccessRoleArn
.
Makes it possible to control how your transcription job is processed. Currently, the only JobExecutionSettings
modification you can choose is enabling job queueing using the AllowDeferredExecution
sub-parameter.
If you include JobExecutionSettings
in your request, you must also include the sub-parameters: AllowDeferredExecution
and DataAccessRoleArn
.
Allows you to redact or flag specified personally identifiable information (PII) in your transcript. If you use ContentRedaction
, you must also include the sub-parameters: PiiEntityTypes
, RedactionOutput
, and RedactionType
.
Makes it possible to redact or flag specified personally identifiable information (PII) in your transcript. If you use ContentRedaction
, you must also include the sub-parameters: PiiEntityTypes
, RedactionOutput
, and RedactionType
.
Enables automatic language identification in your transcription job request.
If you include IdentifyLanguage
, you can optionally include a list of language codes, using LanguageOptions
, that you think may be present in your media file. Including language options can improve transcription accuracy.
If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your automatic language identification request, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
, LanguageModelName
, and VocabularyFilterName
).
Note that you must include one of LanguageCode
, IdentifyLanguage
, or IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your transcription job fails.
Enables automatic language identification in your transcription job request. Use this parameter if your media file contains only one language. If your media contains multiple languages, use IdentifyMultipleLanguages
instead.
If you include IdentifyLanguage
, you can optionally include a list of language codes, using LanguageOptions
, that you think may be present in your media file. Including LanguageOptions
restricts IdentifyLanguage
to only the language options that you specify, which can improve transcription accuracy.
If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your automatic language identification request, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
, LanguageModelName
, and VocabularyFilterName
). If you include LanguageIdSettings
, also include LanguageOptions
.
Note that you must include one of LanguageCode
, IdentifyLanguage
, or IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your transcription job fails.
Enables automatic multi-language identification in your transcription job request. Use this parameter if your media file contains more than one language.
If you include IdentifyMultipleLanguages
, you can optionally include a list of language codes, using LanguageOptions
, that you think may be present in your media file. Including language options can improve transcription accuracy.
If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language identification request, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
and VocabularyFilterName
).
Note that you must include one of LanguageCode
, IdentifyLanguage
, or IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your transcription job fails.
Enables automatic multi-language identification in your transcription job request. Use this parameter if your media file contains more than one language. If your media contains only one language, use IdentifyLanguage
instead.
If you include IdentifyMultipleLanguages
, you can optionally include a list of language codes, using LanguageOptions
, that you think may be present in your media file. Including LanguageOptions
restricts IdentifyLanguage
to only the language options that you specify, which can improve transcription accuracy.
If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language identification request, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
and VocabularyFilterName
). If you include LanguageIdSettings
, also include LanguageOptions
.
Note that you must include one of LanguageCode
, IdentifyLanguage
, or IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your transcription job fails.
You can specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. If you're unsure what languages are present, do not include this parameter.
If you include LanguageOptions
in your request, you must also include IdentifyLanguage
.
For more information, refer to Supported languages.
To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample rate of 16,000 Hz or higher.
You can specify two or more language codes that represent the languages you think may be present in your media. Including more than five is not recommended. If you're unsure what languages are present, do not include this parameter.
If you include LanguageOptions
in your request, you must also include IdentifyLanguage
.
For more information, refer to Supported languages.
To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample rate of 16,000 Hz or higher.
If using automatic language identification (IdentifyLanguage
) in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
, LanguageModelName
, and VocabularyFilterName
).
You can specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The languages you specify must match the languages of the specified custom language models, custom vocabularies, and custom vocabulary filters.
To include language options using IdentifyLanguage
without including a custom language model, a custom vocabulary, or a custom vocabulary filter, use LanguageOptions
instead of LanguageIdSettings
. Including language options can improve the accuracy of automatic language identification.
If you want to include a custom language model with your request but do not want to use automatic language identification, use instead the parameter with the
LanguageModelName
sub-parameter.
If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use instead the parameter with the
VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
If using automatic language identification in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
, LanguageModelName
, and VocabularyFilterName
). Note that multi-language identification (IdentifyMultipleLanguages
) doesn't support custom language models.
LanguageIdSettings
supports two to five language codes. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The language codes that you specify must match the languages of the associated custom language models, custom vocabularies, and custom vocabulary filters.
It's recommended that you include LanguageOptions
when using LanguageIdSettings
to ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary that is in en-US
but Amazon Transcribe determines that the language spoken in your media is en-AU
, your custom vocabulary is not applied to your transcription. If you include LanguageOptions
and include en-US
as the only English language dialect, your custom vocabulary is applied to your transcription.
If you want to include a custom language model with your request but do not want to use automatic language identification, use instead the parameter with the
LanguageModelName
sub-parameter. If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use instead the parameter with the
VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
The Amazon Resource Name (ARN) of the resource you want to tag. ARNs have the format arn:partition:service:region:account-id:resource-type/resource-id
.
For example, arn:aws:transcribe:us-west-2:account-id:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
, medical-transcription-job
, vocabulary
, medical-vocabulary
, vocabulary-filter
, and language-model
.
The Amazon Resource Name (ARN) of the resource you want to tag. ARNs have the format arn:partition:service:region:account-id:resource-type/resource-id
.
For example, arn:aws:transcribe:us-west-2:111122223333:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
, medical-transcription-job
, vocabulary
, medical-vocabulary
, vocabulary-filter
, and language-model
.
Flag the presence or absence of an exact match to the phrases you specify. For example, if you specify the phrase \"speak to a manager\" as your Targets
value, only that exact phrase is flagged.
Note that semantic matching is not supported. For example, if your customer says \"speak to the manager\", instead of \"speak to a manager\", your content is not flagged.
" + "documentation": "Flag the presence or absence of an exact match to the phrases that you specify. For example, if you specify the phrase \"speak to a manager\" as your Targets
value, only that exact phrase is flagged.
Note that semantic matching is not supported. For example, if your customer says \"speak to the manager\", instead of \"speak to a manager\", your content is not flagged.
" }, "AbsoluteTimeRange": { "shape": "AbsoluteTimeRange", - "documentation": "Allows you to specify a time range (in milliseconds) in your audio, during which you want to search for the specified key words or phrases. See for more detail.
" + "documentation": "Makes it possible to specify a time range (in milliseconds) in your audio, during which you want to search for the specified key words or phrases. See for more detail.
" }, "RelativeTimeRange": { "shape": "RelativeTimeRange", - "documentation": "Allows you to specify a time range (in percentage) in your media file, during which you want to search for the specified key words or phrases. See for more detail.
" + "documentation": "Makes it possible to specify a time range (in percentage) in your media file, during which you want to search for the specified key words or phrases. See for more detail.
" }, "ParticipantRole": { "shape": "ParticipantRole", - "documentation": "Specify the participant you want to flag. Omitting this parameter is equivalent to specifying both participants.
" + "documentation": "Specify the participant that you want to flag. Omitting this parameter is equivalent to specifying both participants.
" }, "Negate": { "shape": "Boolean", - "documentation": "Set to TRUE
to flag the absence of the phrase you specified in your request. Set to FALSE
to flag the presence of the phrase you specified in your request.
Set to TRUE
to flag the absence of the phrase that you specified in your request. Set to FALSE
to flag the presence of the phrase that you specified in your request.
Specify the phrases you want to flag.
" + "documentation": "Specify the phrases that you want to flag.
" } }, - "documentation": "Flag the presence or absence of specific words or phrases detected in your Call Analytics transcription output.
Rules using TranscriptFilter
are designed to match:
Custom words or phrases spoken by the agent, the customer, or both
Custom words or phrases not spoken by the agent, the customer, or either
Custom words or phrases that occur at a specific time frame
See Rule criteria for examples.
" + "documentation": "Flag the presence or absence of specific words or phrases detected in your Call Analytics transcription output.
Rules using TranscriptFilter
are designed to match:
Custom words or phrases spoken by the agent, the customer, or both
Custom words or phrases not spoken by the agent, the customer, or either
Custom words or phrases that occur at a specific time frame
See Rule criteria for batch categories and Rule criteria for streaming categories for usage examples.
" }, "TranscriptFilterType": { "type": "string", @@ -3373,11 +3391,11 @@ }, "LanguageCode": { "shape": "LanguageCode", - "documentation": "The language code used to create your transcription job. For a list of supported languages and their associated language codes, refer to the Supported languages table.
Note that you must include one of LanguageCode
, IdentifyLanguage
, or IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your transcription job fails.
The language code used to create your transcription job. This parameter is used with single-language identification. For multi-language identification requests, refer to the plural version of this parameter, LanguageCodes
.
The sample rate, in Hertz, of the audio track in your input media file.
" + "documentation": "The sample rate, in hertz, of the audio track in your input media file.
" }, "MediaFormat": { "shape": "MediaFormat", @@ -3385,7 +3403,7 @@ }, "Media": { "shape": "Media", - "documentation": "Describes the Amazon S3 location of the media file you want to use in your request.
" + "documentation": "Provides the Amazon S3 location of the media file you used in your request.
" }, "Transcript": { "shape": "Transcript", @@ -3405,23 +3423,23 @@ }, "FailureReason": { "shape": "FailureReason", - "documentation": "If TranscriptionJobStatus
is FAILED
, FailureReason
contains information about why the transcription job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.
The media format provided does not match the detected media format
.
The media format specified in MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.
Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid. The sample rate must be between 8,000 and 48,000 Hertz.
The sample rate provided does not match the detected sample rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.
Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
If TranscriptionJobStatus
is FAILED
, FailureReason
contains information about why the transcription job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.
The media format provided does not match the detected media format
.
The media format specified in MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.
Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid. The sample rate must be between 8,000 and 48,000 hertz.
The sample rate provided does not match the detected sample rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.
Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
Specify additional optional settings in your request, including channel identification, alternative transcriptions, speaker labeling; allows you to apply custom vocabularies and vocabulary filters.
If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use Settings
with the VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
If you're using automatic language identification with your request and want to include a custom language model, a custom vocabulary, or a custom vocabulary filter, do not use the Settings
parameter; use instead the parameter with the
LanguageModelName
, VocabularyName
or VocabularyFilterName
sub-parameters.
Provides information on any additional settings that were included in your request. Additional settings include channel identification, alternative transcriptions, speaker partitioning, custom vocabularies, and custom vocabulary filters.
" }, "ModelSettings": { "shape": "ModelSettings", - "documentation": "The custom language model you want to include with your transcription job. If you include ModelSettings
in your request, you must include the LanguageModelName
sub-parameter.
Provides information on the custom language model you included in your request.
" }, "JobExecutionSettings": { "shape": "JobExecutionSettings", - "documentation": "Provides information about how your transcription job is being processed. This parameter shows if your request is queued and what data access role is being used.
" + "documentation": "Provides information about how your transcription job was processed. This parameter shows if your request was queued and what data access role was used.
" }, "ContentRedaction": { "shape": "ContentRedaction", - "documentation": "Redacts or flags specified personally identifiable information (PII) in your transcript.
" + "documentation": "Indicates whether redaction was enabled in your transcript.
" }, "IdentifyLanguage": { "shape": "Boolean", @@ -3433,7 +3451,7 @@ }, "LanguageOptions": { "shape": "LanguageOptions", - "documentation": "You can specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. If you're unsure what languages are present, do not include this parameter.
If you include LanguageOptions
in your request, you must also include IdentifyLanguage
.
For more information, refer to Supported languages.
To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample rate of 16,000 Hz or higher.
Provides the language codes you specified in your request.
" }, "IdentifiedLanguageScore": { "shape": "IdentifiedLanguageScore", @@ -3441,19 +3459,19 @@ }, "LanguageCodes": { "shape": "LanguageCodeList", - "documentation": "The language codes used to create your transcription job. This parameter is used with multi-language identification. For single-language identification requests, refer to the singular version of this parameter, LanguageCode
.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
" + "documentation": "The language codes used to create your transcription job. This parameter is used with multi-language identification. For single-language identification requests, refer to the singular version of this parameter, LanguageCode
.
Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you start this new job.
To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
" + "documentation": "The tags, each in the form of a key:value pair, assigned to the specified transcription job.
" }, "Subtitles": { "shape": "SubtitlesOutput", - "documentation": "Generate subtitles for your media file with your transcription request.
" + "documentation": "Indicates whether subtitles were generated with your transcription.
" }, "LanguageIdSettings": { "shape": "LanguageIdSettingsMap", - "documentation": "If using automatic language identification (IdentifyLanguage
) in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
, LanguageModelName
, and VocabularyFilterName
).
You can specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The languages you specify must match the languages of the specified custom language models, custom vocabularies, and custom vocabulary filters.
To include language options using IdentifyLanguage
without including a custom language model, a custom vocabulary, or a custom vocabulary filter, use LanguageOptions
instead of LanguageIdSettings
. Including language options can improve the accuracy of automatic language identification.
If you want to include a custom language model with your request but do not want to use automatic language identification, use instead the parameter with the
LanguageModelName
sub-parameter.
If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use instead the parameter with the
VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
Provides the name and language of all custom language models, custom vocabularies, and custom vocabulary filters that you included in your request.
" } }, "documentation": "Provides detailed information about a transcription job.
To view the status of the specified transcription job, check the TranscriptionJobStatus
field. If the status is COMPLETED
, the job is finished and you can find the results at the location specified in TranscriptFileUri
. If the status is FAILED
, FailureReason
provides details on why your transcription job failed.
If you enabled content redaction, the redacted transcript can be found at the location specified in RedactedTranscriptFileUri
.
The Amazon Resource Name (ARN) of the Amazon Transcribe resource you want to remove tags from. ARNs have the format arn:partition:service:region:account-id:resource-type/resource-id
.
For example, arn:aws:transcribe:us-west-2:account-id:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
, medical-transcription-job
, vocabulary
, medical-vocabulary
, vocabulary-filter
, and language-model
.
The Amazon Resource Name (ARN) of the Amazon Transcribe resource you want to remove tags from. ARNs have the format arn:partition:service:region:account-id:resource-type/resource-id
.
For example, arn:aws:transcribe:us-west-2:111122223333:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
, medical-transcription-job
, vocabulary
, medical-vocabulary
, vocabulary-filter
, and language-model
.
The rules used for the updated Call Analytics category. The rules you provide in this field replace the ones that are currently being used in the specified category.
" + }, + "InputType": { + "shape": "InputType", + "documentation": "Choose whether you want to update a streaming or a batch Call Analytics category. The input type you specify must match the input type specified when the category was created. For example, if you created a category with the POST_CALL
input type, you must use POST_CALL
as the input type when updating this category.
The name of the custom medical vocabulary you want to update. Vocabulary names are case sensitive.
" + "documentation": "The name of the custom medical vocabulary you want to update. Custom medical vocabulary names are case sensitive.
" }, "LanguageCode": { "shape": "LanguageCode", @@ -3625,7 +3647,7 @@ }, "LanguageCode": { "shape": "LanguageCode", - "documentation": "The language code you selected for your medical vocabulary. US English (en-US
) is the only language supported with Amazon Transcribe Medical.
The language code you selected for your custom medical vocabulary. US English (en-US
) is the only language supported with Amazon Transcribe Medical.
The processing state of your custom medical vocabulary. If the state is READY
, you can use the vocabulary in a StartMedicalTranscriptionJob
request.
The processing state of your custom medical vocabulary. If the state is READY
, you can use the custom vocabulary in a StartMedicalTranscriptionJob
request.
The name of the custom vocabulary filter you want to update. Vocabulary filter names are case sensitive.
" + "documentation": "The name of the custom vocabulary filter you want to update. Custom vocabulary filter names are case sensitive.
" }, "Words": { "shape": "Words", - "documentation": "Use this parameter if you want to update your vocabulary filter by including all desired terms, as comma-separated values, within your request. The other option for updating your vocabulary filter is to save your entries in a text file and upload them to an Amazon S3 bucket, then specify the location of your file using the VocabularyFilterFileUri
parameter.
Note that if you include Words
in your request, you cannot use VocabularyFilterFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
" + "documentation": "Use this parameter if you want to update your custom vocabulary filter by including all desired terms, as comma-separated values, within your request. The other option for updating your vocabulary filter is to save your entries in a text file and upload them to an Amazon S3 bucket, then specify the location of your file using the VocabularyFilterFileUri
parameter.
Note that if you include Words
in your request, you cannot use VocabularyFilterFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your custom vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
" }, "VocabularyFilterFileUri": { "shape": "Uri", @@ -3666,11 +3688,11 @@ }, "LanguageCode": { "shape": "LanguageCode", - "documentation": "The language code you selected for your vocabulary filter.
" + "documentation": "The language code you selected for your custom vocabulary filter.
" }, "LastModifiedTime": { "shape": "DateTime", - "documentation": "The date and time the specified vocabulary filter was last updated.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified custom vocabulary filter was last updated.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The name of the custom vocabulary you want to update. Vocabulary names are case sensitive.
" + "documentation": "The name of the custom vocabulary you want to update. Custom vocabulary names are case sensitive.
" }, "LanguageCode": { "shape": "LanguageCode", - "documentation": "The language code that represents the language of the entries in the custom vocabulary you want to update. Each vocabulary must contain terms in only one language.
A custom vocabulary can only be used to transcribe files in the same language as the vocabulary. For example, if you create a vocabulary using US English (en-US
), you can only apply this vocabulary to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
" + "documentation": "The language code that represents the language of the entries in the custom vocabulary you want to update. Each custom vocabulary must contain terms in only one language.
A custom vocabulary can only be used to transcribe files in the same language as the custom vocabulary. For example, if you create a custom vocabulary using US English (en-US
), you can only apply this custom vocabulary to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
" }, "Phrases": { "shape": "Phrases", - "documentation": "Use this parameter if you want to update your vocabulary by including all desired terms, as comma-separated values, within your request. The other option for updating your vocabulary is to save your entries in a text file and upload them to an Amazon S3 bucket, then specify the location of your file using the VocabularyFileUri
parameter.
Note that if you include Phrases
in your request, you cannot use VocabularyFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
" + "documentation": "Use this parameter if you want to update your custom vocabulary by including all desired terms, as comma-separated values, within your request. The other option for updating your custom vocabulary is to save your entries in a text file and upload them to an Amazon S3 bucket, then specify the location of your file using the VocabularyFileUri
parameter.
Note that if you include Phrases
in your request, you cannot use VocabularyFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that specific language. If you use unsupported characters, your custom vocabulary filter request fails. Refer to Character Sets for Custom Vocabularies to get the character set for your language.
" }, "VocabularyFileUri": { "shape": "Uri", @@ -3712,11 +3734,11 @@ }, "LastModifiedTime": { "shape": "DateTime", - "documentation": "The date and time the specified vocabulary was last updated.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified custom vocabulary was last updated.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The processing state of your custom vocabulary. If the state is READY
, you can use the vocabulary in a StartTranscriptionJob
request.
The processing state of your custom vocabulary. If the state is READY
, you can use the custom vocabulary in a StartTranscriptionJob
request.
The language code that represents the language of the entries in your vocabulary filter. Each vocabulary filter must contain terms in only one language.
A vocabulary filter can only be used to transcribe files in the same language as the filter. For example, if you create a vocabulary filter using US English (en-US
), you can only apply this filter to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
" + "documentation": "The language code that represents the language of the entries in your vocabulary filter. Each custom vocabulary filter must contain terms in only one language.
A custom vocabulary filter can only be used to transcribe files in the same language as the filter. For example, if you create a custom vocabulary filter using US English (en-US
), you can only apply this filter to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
" }, "LastModifiedTime": { "shape": "DateTime", - "documentation": "The date and time the specified vocabulary filter was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified custom vocabulary filter was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
Provides information about a vocabulary filter, including the language of the filter, when it was last modified, and its name.
" + "documentation": "Provides information about a custom vocabulary filter, including the language of the filter, when it was last modified, and its name.
" }, "VocabularyFilterMethod": { "type": "string", @@ -3779,18 +3801,18 @@ }, "LanguageCode": { "shape": "LanguageCode", - "documentation": "The language code used to create your custom vocabulary. Each vocabulary must contain terms in only one language.
A custom vocabulary can only be used to transcribe files in the same language as the vocabulary. For example, if you create a vocabulary using US English (en-US
), you can only apply this vocabulary to files that contain English audio.
The language code used to create your custom vocabulary. Each custom vocabulary must contain terms in only one language.
A custom vocabulary can only be used to transcribe files in the same language as the custom vocabulary. For example, if you create a custom vocabulary using US English (en-US
), you can only apply this custom vocabulary to files that contain English audio.
The date and time the specified vocabulary was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified custom vocabulary was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May 4, 2022.
The processing state of your custom vocabulary. If the state is READY
, you can use the vocabulary in a StartTranscriptionJob
request.
The processing state of your custom vocabulary. If the state is READY
, you can use the custom vocabulary in a StartTranscriptionJob
request.
Provides information about a custom vocabulary, including the language of the vocabulary, when it was last modified, its name, and the processing state.
" + "documentation": "Provides information about a custom vocabulary, including the language of the custom vocabulary, when it was last modified, its name, and the processing state.
" }, "VocabularyName": { "type": "string", diff --git a/clients/all.d.ts b/clients/all.d.ts index 5da545bde2..78472a5ede 100644 --- a/clients/all.d.ts +++ b/clients/all.d.ts @@ -322,3 +322,4 @@ export import Scheduler = require('./scheduler'); export import ChimeSDKVoice = require('./chimesdkvoice'); export import IoTRoboRunner = require('./iotroborunner'); export import SsmSap = require('./ssmsap'); +export import OAM = require('./oam'); diff --git a/clients/all.js b/clients/all.js index 760c78f707..a0fab12b1c 100644 --- a/clients/all.js +++ b/clients/all.js @@ -323,5 +323,6 @@ module.exports = { Scheduler: require('./scheduler'), ChimeSDKVoice: require('./chimesdkvoice'), IoTRoboRunner: require('./iotroborunner'), - SsmSap: require('./ssmsap') + SsmSap: require('./ssmsap'), + OAM: require('./oam') }; \ No newline at end of file diff --git a/clients/backup.d.ts b/clients/backup.d.ts index 5d4da19a95..90ad2119aa 100644 --- a/clients/backup.d.ts +++ b/clients/backup.d.ts @@ -11,6 +11,14 @@ declare class Backup extends Service { */ constructor(options?: Backup.Types.ClientConfiguration) config: Config & Backup.Types.ClientConfiguration; + /** + * This action removes the specified legal hold on a recovery point. This action can only be performed by a user with sufficient permissions. + */ + cancelLegalHold(params: Backup.Types.CancelLegalHoldInput, callback?: (err: AWSError, data: Backup.Types.CancelLegalHoldOutput) => void): Request